alistair23-linux/drivers/usb/misc/usbtest.c

2834 lines
72 KiB
C
Raw Normal View History

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/usb.h>
#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
/*-------------------------------------------------------------------------*/
static int override_alt = -1;
module_param_named(alt, override_alt, int, 0644);
MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
/*-------------------------------------------------------------------------*/
/* FIXME make these public somewhere; usbdevfs.h? */
struct usbtest_param {
/* inputs */
unsigned test_num; /* 0..(TEST_CASES-1) */
unsigned iterations;
unsigned length;
unsigned vary;
unsigned sglen;
/* outputs */
struct timeval duration;
};
#define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
/*-------------------------------------------------------------------------*/
#define GENERIC /* let probe() bind using module params */
/* Some devices that can be used for testing will have "real" drivers.
* Entries for those need to be enabled here by hand, after disabling
* that "real" driver.
*/
//#define IBOT2 /* grab iBOT2 webcams */
//#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
/*-------------------------------------------------------------------------*/
struct usbtest_info {
const char *name;
u8 ep_in; /* bulk/intr source */
u8 ep_out; /* bulk/intr sink */
unsigned autoconf:1;
unsigned ctrl_out:1;
unsigned iso:1; /* try iso in/out */
2014-08-22 03:06:37 -06:00
unsigned intr:1; /* try interrupt in/out */
int alt;
};
/* this is accessed only through usbfs ioctl calls.
* one ioctl to issue a test ... one lock per device.
* tests create other threads if they need them.
* urbs and buffers are allocated dynamically,
* and data generated deterministically.
*/
struct usbtest_dev {
struct usb_interface *intf;
struct usbtest_info *info;
int in_pipe;
int out_pipe;
int in_iso_pipe;
int out_iso_pipe;
2014-08-22 03:06:37 -06:00
int in_int_pipe;
int out_int_pipe;
struct usb_endpoint_descriptor *iso_in, *iso_out;
2014-08-22 03:06:37 -06:00
struct usb_endpoint_descriptor *int_in, *int_out;
struct mutex lock;
#define TBUF_SIZE 256
u8 *buf;
};
static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
{
return interface_to_usbdev(test->intf);
}
/* set up all urbs so they can be used with either bulk or interrupt */
#define INTERRUPT_RATE 1 /* msec/transfer */
#define ERROR(tdev, fmt, args...) \
dev_err(&(tdev)->intf->dev , fmt , ## args)
#define WARNING(tdev, fmt, args...) \
dev_warn(&(tdev)->intf->dev , fmt , ## args)
#define GUARD_BYTE 0xA5
/*-------------------------------------------------------------------------*/
static int
get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
{
int tmp;
struct usb_host_interface *alt;
struct usb_host_endpoint *in, *out;
struct usb_host_endpoint *iso_in, *iso_out;
2014-08-22 03:06:37 -06:00
struct usb_host_endpoint *int_in, *int_out;
struct usb_device *udev;
for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
unsigned ep;
in = out = NULL;
iso_in = iso_out = NULL;
2014-08-22 03:06:37 -06:00
int_in = int_out = NULL;
alt = intf->altsetting + tmp;
if (override_alt >= 0 &&
override_alt != alt->desc.bAlternateSetting)
continue;
/* take the first altsetting with in-bulk + out-bulk;
* ignore other endpoints and altsettings.
*/
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
e = alt->endpoint + ep;
switch (usb_endpoint_type(&e->desc)) {
case USB_ENDPOINT_XFER_BULK:
break;
2014-08-22 03:06:37 -06:00
case USB_ENDPOINT_XFER_INT:
if (dev->info->intr)
goto try_intr;
case USB_ENDPOINT_XFER_ISOC:
if (dev->info->iso)
goto try_iso;
/* FALLTHROUGH */
default:
continue;
}
if (usb_endpoint_dir_in(&e->desc)) {
if (!in)
in = e;
} else {
if (!out)
out = e;
}
continue;
2014-08-22 03:06:37 -06:00
try_intr:
if (usb_endpoint_dir_in(&e->desc)) {
if (!int_in)
int_in = e;
} else {
if (!int_out)
int_out = e;
}
continue;
try_iso:
if (usb_endpoint_dir_in(&e->desc)) {
if (!iso_in)
iso_in = e;
} else {
if (!iso_out)
iso_out = e;
}
}
2014-08-22 03:06:37 -06:00
if ((in && out) || iso_in || iso_out || int_in || int_out)
goto found;
}
return -EINVAL;
found:
udev = testdev_to_usbdev(dev);
dev->info->alt = alt->desc.bAlternateSetting;
if (alt->desc.bAlternateSetting != 0) {
tmp = usb_set_interface(udev,
alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
if (tmp < 0)
return tmp;
}
if (in) {
dev->in_pipe = usb_rcvbulkpipe(udev,
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
dev->out_pipe = usb_sndbulkpipe(udev,
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
}
if (iso_in) {
dev->iso_in = &iso_in->desc;
dev->in_iso_pipe = usb_rcvisocpipe(udev,
iso_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (iso_out) {
dev->iso_out = &iso_out->desc;
dev->out_iso_pipe = usb_sndisocpipe(udev,
iso_out->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
2014-08-22 03:06:37 -06:00
if (int_in) {
dev->int_in = &int_in->desc;
dev->in_int_pipe = usb_rcvintpipe(udev,
int_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (int_out) {
dev->int_out = &int_out->desc;
dev->out_int_pipe = usb_sndintpipe(udev,
int_out->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* Support for testing basic non-queued I/O streams.
*
* These just package urbs as requests that can be easily canceled.
* Each urb's data buffer is dynamically allocated; callers can fill
* them with non-zero test data (or test for it) when appropriate.
*/
static void simple_callback(struct urb *urb)
{
complete(urb->context);
}
static struct urb *usbtest_alloc_urb(
struct usb_device *udev,
int pipe,
unsigned long bytes,
unsigned transfer_flags,
2014-08-22 03:06:37 -06:00
unsigned offset,
u8 bInterval)
{
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return urb;
2014-08-22 03:06:37 -06:00
if (bInterval)
usb_fill_int_urb(urb, udev, pipe, NULL, bytes, simple_callback,
NULL, bInterval);
else
usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback,
NULL);
urb->interval = (udev->speed == USB_SPEED_HIGH)
? (INTERRUPT_RATE << 3)
: INTERRUPT_RATE;
urb->transfer_flags = transfer_flags;
if (usb_pipein(pipe))
urb->transfer_flags |= URB_SHORT_NOT_OK;
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL, &urb->transfer_dma);
else
urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
return NULL;
}
/* To test unaligned transfers add an offset and fill the
unused memory with a guard value */
if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset;
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_dma += offset;
}
/* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes);
return urb;
}
static struct urb *simple_alloc_urb(
struct usb_device *udev,
int pipe,
2014-08-22 03:06:37 -06:00
unsigned long bytes,
u8 bInterval)
{
2014-08-22 03:06:37 -06:00
return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
bInterval);
}
static unsigned pattern;
static unsigned mod_pattern;
module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
static inline void simple_fill_buf(struct urb *urb)
{
unsigned i;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->transfer_buffer_length;
switch (pattern) {
default:
/* FALLTHROUGH */
case 0:
memset(buf, 0, len);
break;
case 1: /* mod63 */
for (i = 0; i < len; i++)
*buf++ = (u8) (i % 63);
break;
}
}
static inline unsigned long buffer_offset(void *buf)
{
return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
}
static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
u8 *guard = buf - buffer_offset(buf);
unsigned i;
for (i = 0; guard < buf; i++, guard++) {
if (*guard != GUARD_BYTE) {
ERROR(tdev, "guard byte[%d] %d (not %d)\n",
i, *guard, GUARD_BYTE);
return -EINVAL;
}
}
return 0;
}
static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
{
unsigned i;
u8 expected;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->actual_length;
int ret = check_guard_bytes(tdev, urb);
if (ret)
return ret;
for (i = 0; i < len; i++, buf++) {
switch (pattern) {
/* all-zeroes has no synchronization issues */
case 0:
expected = 0;
break;
/* mod63 stays in sync with short-terminated transfers,
* or otherwise when host and gadget agree on how large
* each usb transfer request should be. resync is done
* with set_interface or set_config.
*/
case 1: /* mod63 */
expected = i % 63;
break;
/* always fail unsupported patterns */
default:
expected = !*buf;
break;
}
if (*buf == expected)
continue;
ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
return -EINVAL;
}
return 0;
}
static void simple_free_urb(struct urb *urb)
{
unsigned long offset = buffer_offset(urb->transfer_buffer);
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
usb_free_coherent(
urb->dev,
urb->transfer_buffer_length + offset,
urb->transfer_buffer - offset,
urb->transfer_dma - offset);
else
kfree(urb->transfer_buffer - offset);
usb_free_urb(urb);
}
static int simple_io(
struct usbtest_dev *tdev,
struct urb *urb,
int iterations,
int vary,
int expected,
const char *label
)
{
struct usb_device *udev = urb->dev;
int max = urb->transfer_buffer_length;
struct completion completion;
int retval = 0;
unsigned long expire;
urb->context = &completion;
while (retval == 0 && iterations-- > 0) {
init_completion(&completion);
if (usb_pipeout(urb->pipe)) {
simple_fill_buf(urb);
urb->transfer_flags |= URB_ZERO_PACKET;
}
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval != 0)
break;
expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
if (!wait_for_completion_timeout(&completion, expire)) {
usb_kill_urb(urb);
retval = (urb->status == -ENOENT ?
-ETIMEDOUT : urb->status);
} else {
retval = urb->status;
}
urb->dev = udev;
if (retval == 0 && usb_pipein(urb->pipe))
retval = simple_check_buf(tdev, urb);
if (vary) {
int len = urb->transfer_buffer_length;
len += vary;
len %= max;
if (len == 0)
len = (vary < max) ? vary : max;
urb->transfer_buffer_length = len;
}
/* FIXME if endpoint halted, clear halt (and log) */
}
urb->transfer_buffer_length = max;
if (expected != retval)
dev_err(&udev->dev,
"%s failed, iterations left %d, status %d (not %d)\n",
label, iterations, retval, expected);
return retval;
}
/*-------------------------------------------------------------------------*/
/* We use scatterlist primitives to test queued I/O.
* Yes, this also tests the scatterlist primitives.
*/
static void free_sglist(struct scatterlist *sg, int nents)
{
unsigned i;
if (!sg)
return;
for (i = 0; i < nents; i++) {
if (!sg_page(&sg[i]))
continue;
kfree(sg_virt(&sg[i]));
}
kfree(sg);
}
static struct scatterlist *
alloc_sglist(int nents, int max, int vary)
{
struct scatterlist *sg;
unsigned i;
unsigned size = max;
if (max == 0)
return NULL;
sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
if (!sg)
return NULL;
sg_init_table(sg, nents);
for (i = 0; i < nents; i++) {
char *buf;
unsigned j;
buf = kzalloc(size, GFP_KERNEL);
if (!buf) {
free_sglist(sg, i);
return NULL;
}
/* kmalloc pages are always physically contiguous! */
sg_set_buf(&sg[i], buf, size);
switch (pattern) {
case 0:
/* already zeroed */
break;
case 1:
for (j = 0; j < size; j++)
*buf++ = (u8) (j % 63);
break;
}
if (vary) {
size += vary;
size %= max;
if (size == 0)
size = (vary < max) ? vary : max;
}
}
return sg;
}
static void sg_timeout(unsigned long _req)
{
struct usb_sg_request *req = (struct usb_sg_request *) _req;
req->status = -ETIMEDOUT;
usb_sg_cancel(req);
}
static int perform_sglist(
struct usbtest_dev *tdev,
unsigned iterations,
int pipe,
struct usb_sg_request *req,
struct scatterlist *sg,
int nents
)
{
struct usb_device *udev = testdev_to_usbdev(tdev);
int retval = 0;
struct timer_list sg_timer;
setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
while (retval == 0 && iterations-- > 0) {
retval = usb_sg_init(req, udev, pipe,
(udev->speed == USB_SPEED_HIGH)
? (INTERRUPT_RATE << 3)
: INTERRUPT_RATE,
sg, nents, 0, GFP_KERNEL);
if (retval)
break;
mod_timer(&sg_timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
del_timer_sync(&sg_timer);
retval = req->status;
/* FIXME check resulting data pattern */
/* FIXME if endpoint halted, clear halt (and log) */
}
/* FIXME for unlink or fault handling tests, don't report
* failure if retval is as we expected ...
*/
if (retval)
ERROR(tdev, "perform_sglist failed, "
"iterations left %d, status %d\n",
iterations, retval);
return retval;
}
/*-------------------------------------------------------------------------*/
/* unqueued control message testing
*
* there's a nice set of device functional requirements in chapter 9 of the
* usb 2.0 spec, which we can apply to ANY device, even ones that don't use
* special test firmware.
*
* we know the device is configured (or suspended) by the time it's visible
* through usbfs. we can't change that, so we won't test enumeration (which
* worked 'well enough' to get here, this time), power management (ditto),
* or remote wakeup (which needs human interaction).
*/
static unsigned realworld = 1;
module_param(realworld, uint, 0);
MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
static int get_altsetting(struct usbtest_dev *dev)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev = interface_to_usbdev(iface);
int retval;
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
0, iface->altsetting[0].desc.bInterfaceNumber,
dev->buf, 1, USB_CTRL_GET_TIMEOUT);
switch (retval) {
case 1:
return dev->buf[0];
case 0:
retval = -ERANGE;
/* FALLTHROUGH */
default:
return retval;
}
}
static int set_altsetting(struct usbtest_dev *dev, int alternate)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev;
if (alternate < 0 || alternate >= 256)
return -EINVAL;
udev = interface_to_usbdev(iface);
return usb_set_interface(udev,
iface->altsetting[0].desc.bInterfaceNumber,
alternate);
}
static int is_good_config(struct usbtest_dev *tdev, int len)
{
struct usb_config_descriptor *config;
if (len < sizeof(*config))
return 0;
config = (struct usb_config_descriptor *) tdev->buf;
switch (config->bDescriptorType) {
case USB_DT_CONFIG:
case USB_DT_OTHER_SPEED_CONFIG:
if (config->bLength != 9) {
ERROR(tdev, "bogus config descriptor length\n");
return 0;
}
/* this bit 'must be 1' but often isn't */
if (!realworld && !(config->bmAttributes & 0x80)) {
ERROR(tdev, "high bit of config attributes not set\n");
return 0;
}
if (config->bmAttributes & 0x1f) { /* reserved == 0 */
ERROR(tdev, "reserved config bits set\n");
return 0;
}
break;
default:
return 0;
}
if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
return 1;
if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
return 1;
ERROR(tdev, "bogus config descriptor read size\n");
return 0;
}
static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
{
struct usb_ext_cap_descriptor *ext;
u32 attr;
ext = (struct usb_ext_cap_descriptor *) buf;
if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
return 0;
}
attr = le32_to_cpu(ext->bmAttributes);
/* bits[1:15] is used and others are reserved */
if (attr & ~0xfffe) { /* reserved == 0 */
ERROR(tdev, "reserved bits set\n");
return 0;
}
return 1;
}
static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
{
struct usb_ss_cap_descriptor *ss;
ss = (struct usb_ss_cap_descriptor *) buf;
if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
ERROR(tdev, "bogus superspeed device capability descriptor length\n");
return 0;
}
/*
* only bit[1] of bmAttributes is used for LTM and others are
* reserved
*/
if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
ERROR(tdev, "reserved bits set in bmAttributes\n");
return 0;
}
/* bits[0:3] of wSpeedSupported is used and others are reserved */
if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
ERROR(tdev, "reserved bits set in wSpeedSupported\n");
return 0;
}
return 1;
}
static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
{
struct usb_ss_container_id_descriptor *con_id;
con_id = (struct usb_ss_container_id_descriptor *) buf;
if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
ERROR(tdev, "bogus container id descriptor length\n");
return 0;
}
if (con_id->bReserved) { /* reserved == 0 */
ERROR(tdev, "reserved bits set\n");
return 0;
}
return 1;
}
/* sanity test for standard requests working with usb_control_mesg() and some
* of the utility functions which use it.
*
* this doesn't test how endpoint halts behave or data toggles get set, since
* we won't do I/O to bulk/interrupt endpoints here (which is how to change
* halt or toggle). toggle testing is impractical without support from hcds.
*
* this avoids failing devices linux would normally work with, by not testing
* config/altsetting operations for devices that only support their defaults.
* such devices rarely support those needless operations.
*
* NOTE that since this is a sanity test, it's not examining boundary cases
* to see if usbcore, hcd, and device all behave right. such testing would
* involve varied read sizes and other operation sequences.
*/
static int ch9_postconfig(struct usbtest_dev *dev)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev = interface_to_usbdev(iface);
int i, alt, retval;
/* [9.2.3] if there's more than one altsetting, we need to be able to
* set and get each one. mostly trusts the descriptors from usbcore.
*/
for (i = 0; i < iface->num_altsetting; i++) {
/* 9.2.3 constrains the range here */
alt = iface->altsetting[i].desc.bAlternateSetting;
if (alt < 0 || alt >= iface->num_altsetting) {
dev_err(&iface->dev,
"invalid alt [%d].bAltSetting = %d\n",
i, alt);
}
/* [real world] get/set unimplemented if there's only one */
if (realworld && iface->num_altsetting == 1)
continue;
/* [9.4.10] set_interface */
retval = set_altsetting(dev, alt);
if (retval) {
dev_err(&iface->dev, "can't set_interface = %d, %d\n",
alt, retval);
return retval;
}
/* [9.4.4] get_interface always works */
retval = get_altsetting(dev);
if (retval != alt) {
dev_err(&iface->dev, "get alt should be %d, was %d\n",
alt, retval);
return (retval < 0) ? retval : -EDOM;
}
}
/* [real world] get_config unimplemented if there's only one */
if (!realworld || udev->descriptor.bNumConfigurations != 1) {
int expected = udev->actconfig->desc.bConfigurationValue;
/* [9.4.2] get_configuration always works
* ... although some cheap devices (like one TI Hub I've got)
* won't return config descriptors except before set_config.
*/
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_CONFIGURATION,
USB_DIR_IN | USB_RECIP_DEVICE,
0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
if (retval != 1 || dev->buf[0] != expected) {
dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
retval, dev->buf[0], expected);
return (retval < 0) ? retval : -EDOM;
}
}
/* there's always [9.4.3] a device descriptor [9.6.1] */
retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
dev->buf, sizeof(udev->descriptor));
if (retval != sizeof(udev->descriptor)) {
dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
/*
* there's always [9.4.3] a bos device descriptor [9.6.2] in USB
* 3.0 spec
*/
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
struct usb_bos_descriptor *bos = NULL;
struct usb_dev_cap_header *header = NULL;
unsigned total, num, length;
u8 *buf;
retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
sizeof(*udev->bos->desc));
if (retval != sizeof(*udev->bos->desc)) {
dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
bos = (struct usb_bos_descriptor *)dev->buf;
total = le16_to_cpu(bos->wTotalLength);
num = bos->bNumDeviceCaps;
if (total > TBUF_SIZE)
total = TBUF_SIZE;
/*
* get generic device-level capability descriptors [9.6.2]
* in USB 3.0 spec
*/
retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
total);
if (retval != total) {
dev_err(&iface->dev, "bos descriptor set --> %d\n",
retval);
return (retval < 0) ? retval : -EDOM;
}
length = sizeof(*udev->bos->desc);
buf = dev->buf;
for (i = 0; i < num; i++) {
buf += length;
if (buf + sizeof(struct usb_dev_cap_header) >
dev->buf + total)
break;
header = (struct usb_dev_cap_header *)buf;
length = header->bLength;
if (header->bDescriptorType !=
USB_DT_DEVICE_CAPABILITY) {
dev_warn(&udev->dev, "not device capability descriptor, skip\n");
continue;
}
switch (header->bDevCapabilityType) {
case USB_CAP_TYPE_EXT:
if (buf + USB_DT_USB_EXT_CAP_SIZE >
dev->buf + total ||
!is_good_ext(dev, buf)) {
dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
return -EDOM;
}
break;
case USB_SS_CAP_TYPE:
if (buf + USB_DT_USB_SS_CAP_SIZE >
dev->buf + total ||
!is_good_ss_cap(dev, buf)) {
dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
return -EDOM;
}
break;
case CONTAINER_ID_TYPE:
if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
dev->buf + total ||
!is_good_con_id(dev, buf)) {
dev_err(&iface->dev, "bogus container id descriptor\n");
return -EDOM;
}
break;
default:
break;
}
}
}
/* there's always [9.4.3] at least one config descriptor [9.6.3] */
for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
dev->buf, TBUF_SIZE);
if (!is_good_config(dev, retval)) {
dev_err(&iface->dev,
"config [%d] descriptor --> %d\n",
i, retval);
return (retval < 0) ? retval : -EDOM;
}
/* FIXME cross-checking udev->config[i] to make sure usbcore
* parsed it right (etc) would be good testing paranoia
*/
}
/* and sometimes [9.2.6.6] speed dependent descriptors */
if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
struct usb_qualifier_descriptor *d = NULL;
/* device qualifier [9.6.2] */
retval = usb_get_descriptor(udev,
USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
sizeof(struct usb_qualifier_descriptor));
if (retval == -EPIPE) {
if (udev->speed == USB_SPEED_HIGH) {
dev_err(&iface->dev,
"hs dev qualifier --> %d\n",
retval);
return (retval < 0) ? retval : -EDOM;
}
/* usb2.0 but not high-speed capable; fine */
} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
} else
d = (struct usb_qualifier_descriptor *) dev->buf;
/* might not have [9.6.2] any other-speed configs [9.6.4] */
if (d) {
unsigned max = d->bNumConfigurations;
for (i = 0; i < max; i++) {
retval = usb_get_descriptor(udev,
USB_DT_OTHER_SPEED_CONFIG, i,
dev->buf, TBUF_SIZE);
if (!is_good_config(dev, retval)) {
dev_err(&iface->dev,
"other speed config --> %d\n",
retval);
return (retval < 0) ? retval : -EDOM;
}
}
}
}
/* FIXME fetch strings from at least the device descriptor */
/* [9.4.5] get_status always works */
retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
if (retval) {
dev_err(&iface->dev, "get dev status --> %d\n", retval);
return retval;
}
/* FIXME configuration.bmAttributes says if we could try to set/clear
* the device's remote wakeup feature ... if we can, test that here
*/
retval = usb_get_status(udev, USB_RECIP_INTERFACE,
iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
if (retval) {
dev_err(&iface->dev, "get interface status --> %d\n", retval);
return retval;
}
/* FIXME get status for each endpoint in the interface */
return 0;
}
/*-------------------------------------------------------------------------*/
/* use ch9 requests to test whether:
* (a) queues work for control, keeping N subtests queued and
* active (auto-resubmit) for M loops through the queue.
* (b) protocol stalls (control-only) will autorecover.
* it's not like bulk/intr; no halt clearing.
* (c) short control reads are reported and handled.
* (d) queues are always processed in-order
*/
struct ctrl_ctx {
spinlock_t lock;
struct usbtest_dev *dev;
struct completion complete;
unsigned count;
unsigned pending;
int status;
struct urb **urb;
struct usbtest_param *param;
int last;
};
#define NUM_SUBCASES 16 /* how many test subcases here? */
struct subcase {
struct usb_ctrlrequest setup;
int number;
int expected;
};
static void ctrl_complete(struct urb *urb)
{
struct ctrl_ctx *ctx = urb->context;
struct usb_ctrlrequest *reqp;
struct subcase *subcase;
int status = urb->status;
reqp = (struct usb_ctrlrequest *)urb->setup_packet;
subcase = container_of(reqp, struct subcase, setup);
spin_lock(&ctx->lock);
ctx->count--;
ctx->pending--;
/* queue must transfer and complete in fifo order, unless
* usb_unlink_urb() is used to unlink something not at the
* physical queue head (not tested).
*/
if (subcase->number > 0) {
if ((subcase->number - ctx->last) != 1) {
ERROR(ctx->dev,
"subcase %d completed out of order, last %d\n",
subcase->number, ctx->last);
status = -EDOM;
ctx->last = subcase->number;
goto error;
}
}
ctx->last = subcase->number;
/* succeed or fault in only one way? */
if (status == subcase->expected)
status = 0;
/* async unlink for cleanup? */
else if (status != -ECONNRESET) {
/* some faults are allowed, not required */
if (subcase->expected > 0 && (
((status == -subcase->expected /* happened */
|| status == 0)))) /* didn't */
status = 0;
/* sometimes more than one fault is allowed */
else if (subcase->number == 12 && status == -EPIPE)
status = 0;
else
ERROR(ctx->dev, "subtest %d error, status %d\n",
subcase->number, status);
}
/* unexpected status codes mean errors; ideally, in hardware */
if (status) {
error:
if (ctx->status == 0) {
int i;
ctx->status = status;
ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
"%d left, subcase %d, len %d/%d\n",
reqp->bRequestType, reqp->bRequest,
status, ctx->count, subcase->number,
urb->actual_length,
urb->transfer_buffer_length);
/* FIXME this "unlink everything" exit route should
* be a separate test case.
*/
/* unlink whatever's still pending */
for (i = 1; i < ctx->param->sglen; i++) {
struct urb *u = ctx->urb[
(i + subcase->number)
% ctx->param->sglen];
if (u == urb || !u->dev)
continue;
spin_unlock(&ctx->lock);
status = usb_unlink_urb(u);
spin_lock(&ctx->lock);
switch (status) {
case -EINPROGRESS:
case -EBUSY:
case -EIDRM:
continue;
default:
ERROR(ctx->dev, "urb unlink --> %d\n",
status);
}
}
status = ctx->status;
}
}
/* resubmit if we need to, else mark this as done */
if ((status == 0) && (ctx->pending < ctx->count)) {
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status != 0) {
ERROR(ctx->dev,
"can't resubmit ctrl %02x.%02x, err %d\n",
reqp->bRequestType, reqp->bRequest, status);
urb->dev = NULL;
} else
ctx->pending++;
} else
urb->dev = NULL;
/* signal completion when nothing's queued */
if (ctx->pending == 0)
complete(&ctx->complete);
spin_unlock(&ctx->lock);
}
static int
test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
{
struct usb_device *udev = testdev_to_usbdev(dev);
struct urb **urb;
struct ctrl_ctx context;
int i;
if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
return -EOPNOTSUPP;
spin_lock_init(&context.lock);
context.dev = dev;
init_completion(&context.complete);
context.count = param->sglen * param->iterations;
context.pending = 0;
context.status = -ENOMEM;
context.param = param;
context.last = -1;
/* allocate and init the urbs we'll queue.
* as with bulk/intr sglists, sglen is the queue depth; it also
* controls which subtests run (more tests than sglen) or rerun.
*/
urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
if (!urb)
return -ENOMEM;
for (i = 0; i < param->sglen; i++) {
int pipe = usb_rcvctrlpipe(udev, 0);
unsigned len;
struct urb *u;
struct usb_ctrlrequest req;
struct subcase *reqp;
/* sign of this variable means:
* -: tested code must return this (negative) error code
* +: tested code may return this (negative too) error code
*/
int expected = 0;
/* requests here are mostly expected to succeed on any
* device, but some are chosen to trigger protocol stalls
* or short reads.
*/
memset(&req, 0, sizeof(req));
req.bRequest = USB_REQ_GET_DESCRIPTOR;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
switch (i % NUM_SUBCASES) {
case 0: /* get device descriptor */
req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
len = sizeof(struct usb_device_descriptor);
break;
case 1: /* get first config descriptor (only) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
break;
case 2: /* get altsetting (OFTEN STALLS) */
req.bRequest = USB_REQ_GET_INTERFACE;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* index = 0 means first interface */
len = 1;
expected = EPIPE;
break;
case 3: /* get interface status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* interface 0 */
len = 2;
break;
case 4: /* get device status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
len = 2;
break;
case 5: /* get device qualifier (MAY STALL) */
req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
len = sizeof(struct usb_qualifier_descriptor);
if (udev->speed != USB_SPEED_HIGH)
expected = EPIPE;
break;
case 6: /* get first config descriptor, plus interface */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
len += sizeof(struct usb_interface_descriptor);
break;
case 7: /* get interface descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
/* interface == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = -EPIPE;
break;
/* NOTE: two consecutive stalls in the queue here.
* that tests fault recovery a bit more aggressively. */
case 8: /* clear endpoint halt (MAY STALL) */
req.bRequest = USB_REQ_CLEAR_FEATURE;
req.bRequestType = USB_RECIP_ENDPOINT;
/* wValue 0 == ep halt */
/* wIndex 0 == ep0 (shouldn't halt!) */
len = 0;
pipe = usb_sndctrlpipe(udev, 0);
expected = EPIPE;
break;
case 9: /* get endpoint status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
/* endpoint 0 */
len = 2;
break;
case 10: /* trigger short read (EREMOTEIO) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = 1024;
expected = -EREMOTEIO;
break;
/* NOTE: two consecutive _different_ faults in the queue. */
case 11: /* get endpoint descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
/* endpoint == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = EPIPE;
break;
/* NOTE: sometimes even a third fault in the queue! */
case 12: /* get string 0 descriptor (MAY STALL) */
req.wValue = cpu_to_le16(USB_DT_STRING << 8);
/* string == 0, for language IDs */
len = sizeof(struct usb_interface_descriptor);
/* may succeed when > 4 languages */
expected = EREMOTEIO; /* or EPIPE, if no strings */
break;
case 13: /* short read, resembling case 10 */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
/* last data packet "should" be DATA1, not DATA0 */
if (udev->speed == USB_SPEED_SUPER)
len = 1024 - 512;
else
len = 1024 - udev->descriptor.bMaxPacketSize0;
expected = -EREMOTEIO;
break;
case 14: /* short read; try to fill the last packet */
req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
/* device descriptor size == 18 bytes */
len = udev->descriptor.bMaxPacketSize0;
if (udev->speed == USB_SPEED_SUPER)
len = 512;
switch (len) {
case 8:
len = 24;
break;
case 16:
len = 32;
break;
}
expected = -EREMOTEIO;
break;
case 15:
req.wValue = cpu_to_le16(USB_DT_BOS << 8);
if (udev->bos)
len = le16_to_cpu(udev->bos->desc->wTotalLength);
else
len = sizeof(struct usb_bos_descriptor);
if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
expected = -EPIPE;
break;
default:
ERROR(dev, "bogus number of ctrl queue testcases!\n");
context.status = -EINVAL;
goto cleanup;
}
req.wLength = cpu_to_le16(len);
2014-08-22 03:06:37 -06:00
urb[i] = u = simple_alloc_urb(udev, pipe, len, 0);
if (!u)
goto cleanup;
reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
if (!reqp)
goto cleanup;
reqp->setup = req;
reqp->number = i % NUM_SUBCASES;
reqp->expected = expected;
u->setup_packet = (char *) &reqp->setup;
u->context = &context;
u->complete = ctrl_complete;
}
/* queue the urbs */
context.urb = urb;
spin_lock_irq(&context.lock);
for (i = 0; i < param->sglen; i++) {
context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
if (context.status != 0) {
ERROR(dev, "can't submit urb[%d], status %d\n",
i, context.status);
context.count = context.pending;
break;
}
context.pending++;
}
spin_unlock_irq(&context.lock);
/* FIXME set timer and time out; provide a disconnect hook */
/* wait for the last one to complete */
if (context.pending > 0)
wait_for_completion(&context.complete);
cleanup:
for (i = 0; i < param->sglen; i++) {
if (!urb[i])
continue;
urb[i]->dev = udev;
kfree(urb[i]->setup_packet);
simple_free_urb(urb[i]);
}
kfree(urb);
return context.status;
}
#undef NUM_SUBCASES
/*-------------------------------------------------------------------------*/
static void unlink1_callback(struct urb *urb)
{
int status = urb->status;
/* we "know" -EPIPE (stall) never happens */
if (!status)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
urb->status = status;
complete(urb->context);
}
}
static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
{
struct urb *urb;
struct completion completion;
int retval = 0;
init_completion(&completion);
2014-08-22 03:06:37 -06:00
urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0);
if (!urb)
return -ENOMEM;
urb->context = &completion;
urb->complete = unlink1_callback;
usb: usbtest: fix unlink write error with pattern 1 TEST 12 and TEST 24 unlinks the URB write request for N times. When host and gadget both initialize pattern 1 (mod 63) data series to transfer, the gadget side will complain the wrong data which is not expected. Because in host side, usbtest doesn't fill the data buffer as mod 63 and this patch fixed it. [20285.488974] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Not Ready [20285.489181] dwc3 dwc3.0.auto: ep1out-bulk: reason Transfer Not Active [20285.489423] dwc3 dwc3.0.auto: ep1out-bulk: req ffff8800aa6cb480 dma aeb50800 length 512 last [20285.489727] dwc3 dwc3.0.auto: ep1out-bulk: cmd 'Start Transfer' params 00000000 a9eaf000 00000000 [20285.490055] dwc3 dwc3.0.auto: Command Complete --> 0 [20285.490281] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Not Ready [20285.490492] dwc3 dwc3.0.auto: ep1out-bulk: reason Transfer Active [20285.490713] dwc3 dwc3.0.auto: ep1out-bulk: endpoint busy [20285.490909] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Complete [20285.491117] dwc3 dwc3.0.auto: request ffff8800aa6cb480 from ep1out-bulk completed 512/512 ===> 0 [20285.491431] zero gadget: bad OUT byte, buf[1] = 0 [20285.491605] dwc3 dwc3.0.auto: ep1out-bulk: cmd 'Set Stall' params 00000000 00000000 00000000 [20285.491915] dwc3 dwc3.0.auto: Command Complete --> 0 [20285.492099] dwc3 dwc3.0.auto: queing request ffff8800aa6cb480 to ep1out-bulk length 512 [20285.492387] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Not Ready [20285.492595] dwc3 dwc3.0.auto: ep1out-bulk: reason Transfer Not Active [20285.492830] dwc3 dwc3.0.auto: ep1out-bulk: req ffff8800aa6cb480 dma aeb51000 length 512 last [20285.493135] dwc3 dwc3.0.auto: ep1out-bulk: cmd 'Start Transfer' params 00000000 a9eaf000 00000000 [20285.493465] dwc3 dwc3.0.auto: Command Complete --> 0 Cc: <stable@vger.kernel.org> Signed-off-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-05-25 20:55:36 -06:00
if (usb_pipeout(urb->pipe)) {
simple_fill_buf(urb);
urb->transfer_flags |= URB_ZERO_PACKET;
}
/* keep the endpoint busy. there are lots of hc/hcd-internal
* states, and testing should get to all of them over time.
*
* FIXME want additional tests for when endpoint is STALLing
* due to errors, or is just NAKing requests.
*/
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval != 0) {
dev_err(&dev->intf->dev, "submit fail %d\n", retval);
return retval;
}
/* unlinking that should always work. variable delay tests more
* hcd states and code paths, even with little other system load.
*/
msleep(jiffies % (2 * INTERRUPT_RATE));
if (async) {
while (!completion_done(&completion)) {
retval = usb_unlink_urb(urb);
if (retval == 0 && usb_pipein(urb->pipe))
retval = simple_check_buf(dev, urb);
switch (retval) {
case -EBUSY:
case -EIDRM:
/* we can't unlink urbs while they're completing
* or if they've completed, and we haven't
* resubmitted. "normal" drivers would prevent
* resubmission, but since we're testing unlink
* paths, we can't.
*/
ERROR(dev, "unlink retry\n");
continue;
case 0:
case -EINPROGRESS:
break;
default:
dev_err(&dev->intf->dev,
"unlink fail %d\n", retval);
return retval;
}
break;
}
} else
usb_kill_urb(urb);
wait_for_completion(&completion);
retval = urb->status;
simple_free_urb(urb);
if (async)
return (retval == -ECONNRESET) ? 0 : retval - 1000;
else
return (retval == -ENOENT || retval == -EPERM) ?
0 : retval - 2000;
}
static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
{
int retval = 0;
/* test sync and async paths */
retval = unlink1(dev, pipe, len, 1);
if (!retval)
retval = unlink1(dev, pipe, len, 0);
return retval;
}
/*-------------------------------------------------------------------------*/
struct queued_ctx {
struct completion complete;
atomic_t pending;
unsigned num;
int status;
struct urb **urbs;
};
static void unlink_queued_callback(struct urb *urb)
{
int status = urb->status;
struct queued_ctx *ctx = urb->context;
if (ctx->status)
goto done;
if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
if (status == -ECONNRESET)
goto done;
/* What error should we report if the URB completed normally? */
}
if (status != 0)
ctx->status = status;
done:
if (atomic_dec_and_test(&ctx->pending))
complete(&ctx->complete);
}
static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
unsigned size)
{
struct queued_ctx ctx;
struct usb_device *udev = testdev_to_usbdev(dev);
void *buf;
dma_addr_t buf_dma;
int i;
int retval = -ENOMEM;
init_completion(&ctx.complete);
atomic_set(&ctx.pending, 1); /* One more than the actual value */
ctx.num = num;
ctx.status = 0;
buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
if (!buf)
return retval;
memset(buf, 0, size);
/* Allocate and init the urbs we'll queue */
ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
if (!ctx.urbs)
goto free_buf;
for (i = 0; i < num; i++) {
ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!ctx.urbs[i])
goto free_urbs;
usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
unlink_queued_callback, &ctx);
ctx.urbs[i]->transfer_dma = buf_dma;
ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
usb: usbtest: fix unlink write error with pattern 1 TEST 12 and TEST 24 unlinks the URB write request for N times. When host and gadget both initialize pattern 1 (mod 63) data series to transfer, the gadget side will complain the wrong data which is not expected. Because in host side, usbtest doesn't fill the data buffer as mod 63 and this patch fixed it. [20285.488974] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Not Ready [20285.489181] dwc3 dwc3.0.auto: ep1out-bulk: reason Transfer Not Active [20285.489423] dwc3 dwc3.0.auto: ep1out-bulk: req ffff8800aa6cb480 dma aeb50800 length 512 last [20285.489727] dwc3 dwc3.0.auto: ep1out-bulk: cmd 'Start Transfer' params 00000000 a9eaf000 00000000 [20285.490055] dwc3 dwc3.0.auto: Command Complete --> 0 [20285.490281] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Not Ready [20285.490492] dwc3 dwc3.0.auto: ep1out-bulk: reason Transfer Active [20285.490713] dwc3 dwc3.0.auto: ep1out-bulk: endpoint busy [20285.490909] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Complete [20285.491117] dwc3 dwc3.0.auto: request ffff8800aa6cb480 from ep1out-bulk completed 512/512 ===> 0 [20285.491431] zero gadget: bad OUT byte, buf[1] = 0 [20285.491605] dwc3 dwc3.0.auto: ep1out-bulk: cmd 'Set Stall' params 00000000 00000000 00000000 [20285.491915] dwc3 dwc3.0.auto: Command Complete --> 0 [20285.492099] dwc3 dwc3.0.auto: queing request ffff8800aa6cb480 to ep1out-bulk length 512 [20285.492387] dwc3 dwc3.0.auto: ep1out-bulk: Transfer Not Ready [20285.492595] dwc3 dwc3.0.auto: ep1out-bulk: reason Transfer Not Active [20285.492830] dwc3 dwc3.0.auto: ep1out-bulk: req ffff8800aa6cb480 dma aeb51000 length 512 last [20285.493135] dwc3 dwc3.0.auto: ep1out-bulk: cmd 'Start Transfer' params 00000000 a9eaf000 00000000 [20285.493465] dwc3 dwc3.0.auto: Command Complete --> 0 Cc: <stable@vger.kernel.org> Signed-off-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-05-25 20:55:36 -06:00
if (usb_pipeout(ctx.urbs[i]->pipe)) {
simple_fill_buf(ctx.urbs[i]);
ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
}
}
/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
for (i = 0; i < num; i++) {
atomic_inc(&ctx.pending);
retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
if (retval != 0) {
dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
i, retval);
atomic_dec(&ctx.pending);
ctx.status = retval;
break;
}
}
if (i == num) {
usb_unlink_urb(ctx.urbs[num - 4]);
usb_unlink_urb(ctx.urbs[num - 2]);
} else {
while (--i >= 0)
usb_unlink_urb(ctx.urbs[i]);
}
if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
complete(&ctx.complete);
wait_for_completion(&ctx.complete);
retval = ctx.status;
free_urbs:
for (i = 0; i < num; i++)
usb_free_urb(ctx.urbs[i]);
kfree(ctx.urbs);
free_buf:
usb_free_coherent(udev, size, buf, buf_dma);
return retval;
}
/*-------------------------------------------------------------------------*/
static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
u16 status;
/* shouldn't look or act halted */
retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
ep, retval);
return retval;
}
if (status != 0) {
ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
return -EINVAL;
}
retval = simple_io(tdev, urb, 1, 0, 0, __func__);
if (retval != 0)
return -EINVAL;
return 0;
}
static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
u16 status;
/* should look and act halted */
retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
ep, retval);
return retval;
}
if (status != 1) {
ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
return -EINVAL;
}
retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
if (retval != -EPIPE)
return -EINVAL;
retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
if (retval != -EPIPE)
return -EINVAL;
return 0;
}
static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
/* shouldn't look or act halted now */
retval = verify_not_halted(tdev, ep, urb);
if (retval < 0)
return retval;
/* set halt (protocol test only), verify it worked */
retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, ep,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
return retval;
}
retval = verify_halted(tdev, ep, urb);
if (retval < 0) {
int ret;
/* clear halt anyways, else further tests will fail */
ret = usb_clear_halt(urb->dev, urb->pipe);
if (ret)
ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
ep, ret);
return retval;
}
/* clear halt (tests API + protocol), verify it worked */
retval = usb_clear_halt(urb->dev, urb->pipe);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
return retval;
}
retval = verify_not_halted(tdev, ep, urb);
if (retval < 0)
return retval;
/* NOTE: could also verify SET_INTERFACE clear halts ... */
return 0;
}
static int halt_simple(struct usbtest_dev *dev)
{
int ep;
int retval = 0;
struct urb *urb;
struct usb_device *udev = testdev_to_usbdev(dev);
if (udev->speed == USB_SPEED_SUPER)
2014-08-22 03:06:37 -06:00
urb = simple_alloc_urb(udev, 0, 1024, 0);
else
2014-08-22 03:06:37 -06:00
urb = simple_alloc_urb(udev, 0, 512, 0);
if (urb == NULL)
return -ENOMEM;
if (dev->in_pipe) {
ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
urb->pipe = dev->in_pipe;
retval = test_halt(dev, ep, urb);
if (retval < 0)
goto done;
}
if (dev->out_pipe) {
ep = usb_pipeendpoint(dev->out_pipe);
urb->pipe = dev->out_pipe;
retval = test_halt(dev, ep, urb);
}
done:
simple_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------------*/
/* Control OUT tests use the vendor control requests from Intel's
* USB 2.0 compliance test device: write a buffer, read it back.
*
* Intel's spec only _requires_ that it work for one packet, which
* is pretty weak. Some HCDs place limits here; most devices will
* need to be able to handle more than one OUT data packet. We'll
* try whatever we're told to try.
*/
static int ctrl_out(struct usbtest_dev *dev,
unsigned count, unsigned length, unsigned vary, unsigned offset)
{
unsigned i, j, len;
int retval;
u8 *buf;
char *what = "?";
struct usb_device *udev;
if (length < 1 || length > 0xffff || vary >= length)
return -EINVAL;
buf = kmalloc(length + offset, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf += offset;
udev = testdev_to_usbdev(dev);
len = length;
retval = 0;
/* NOTE: hardware might well act differently if we pushed it
* with lots back-to-back queued requests.
*/
for (i = 0; i < count; i++) {
/* write patterned data */
for (j = 0; j < len; j++)
buf[j] = i + j;
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
if (retval != len) {
what = "write";
if (retval >= 0) {
ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
}
break;
}
/* read it back -- assuming nothing intervened!! */
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
if (retval != len) {
what = "read";
if (retval >= 0) {
ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
}
break;
}
/* fail if we can't verify */
for (j = 0; j < len; j++) {
if (buf[j] != (u8) (i + j)) {
ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
j, buf[j], (u8) i + j);
retval = -EBADMSG;
break;
}
}
if (retval < 0) {
what = "verify";
break;
}
len += vary;
/* [real world] the "zero bytes IN" case isn't really used.
* hardware can easily trip up in this weird case, since its
* status stage is IN, not OUT like other ep0in transfers.
*/
if (len > length)
len = realworld ? 1 : 0;
}
if (retval < 0)
ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
what, retval, i);
kfree(buf - offset);
return retval;
}
/*-------------------------------------------------------------------------*/
/* ISO tests ... mimics common usage
* - buffer length is split into N packets (mostly maxpacket sized)
* - multi-buffers according to sglen
*/
struct iso_context {
unsigned count;
unsigned pending;
spinlock_t lock;
struct completion done;
int submit_error;
unsigned long errors;
unsigned long packet_count;
struct usbtest_dev *dev;
};
static void iso_callback(struct urb *urb)
{
struct iso_context *ctx = urb->context;
spin_lock(&ctx->lock);
ctx->count--;
ctx->packet_count += urb->number_of_packets;
if (urb->error_count > 0)
ctx->errors += urb->error_count;
else if (urb->status != 0)
ctx->errors += urb->number_of_packets;
else if (urb->actual_length != urb->transfer_buffer_length)
ctx->errors++;
else if (check_guard_bytes(ctx->dev, urb) != 0)
ctx->errors++;
if (urb->status == 0 && ctx->count > (ctx->pending - 1)
&& !ctx->submit_error) {
int status = usb_submit_urb(urb, GFP_ATOMIC);
switch (status) {
case 0:
goto done;
default:
dev_err(&ctx->dev->intf->dev,
"iso resubmit err %d\n",
status);
/* FALLTHROUGH */
case -ENODEV: /* disconnected */
case -ESHUTDOWN: /* endpoint disabled */
ctx->submit_error = 1;
break;
}
}
ctx->pending--;
if (ctx->pending == 0) {
if (ctx->errors)
dev_err(&ctx->dev->intf->dev,
"iso test, %lu errors out of %lu\n",
ctx->errors, ctx->packet_count);
complete(&ctx->done);
}
done:
spin_unlock(&ctx->lock);
}
static struct urb *iso_alloc_urb(
struct usb_device *udev,
int pipe,
struct usb_endpoint_descriptor *desc,
long bytes,
unsigned offset
)
{
struct urb *urb;
unsigned i, maxp, packets;
if (bytes < 0 || !desc)
return NULL;
USB: use usb_endpoint_maxp() instead of le16_to_cpu() Now ${LINUX}/drivers/usb/* can use usb_endpoint_maxp(desc) to get maximum packet size instead of le16_to_cpu(desc->wMaxPacketSize). This patch fix it up Cc: Armin Fuerst <fuerst@in.tum.de> Cc: Pavel Machek <pavel@ucw.cz> Cc: Johannes Erdfelt <johannes@erdfelt.com> Cc: Vojtech Pavlik <vojtech@suse.cz> Cc: Oliver Neukum <oliver@neukum.name> Cc: David Kubicek <dave@awk.cz> Cc: Johan Hovold <jhovold@gmail.com> Cc: Brad Hards <bhards@bigpond.net.au> Acked-by: Felipe Balbi <balbi@ti.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Thomas Dahlmann <dahlmann.thomas@arcor.de> Cc: David Brownell <david-b@pacbell.net> Cc: David Lopo <dlopo@chipidea.mips.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Michal Nazarewicz <m.nazarewicz@samsung.com> Cc: Xie Xiaobo <X.Xie@freescale.com> Cc: Li Yang <leoli@freescale.com> Cc: Jiang Bo <tanya.jiang@freescale.com> Cc: Yuan-hsin Chen <yhchen@faraday-tech.com> Cc: Darius Augulis <augulis.darius@gmail.com> Cc: Xiaochen Shen <xiaochen.shen@intel.com> Cc: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Cc: OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Ben Dooks <ben@simtec.co.uk> Cc: Thomas Abraham <thomas.ab@samsung.com> Cc: Herbert Pötzl <herbert@13thfloor.at> Cc: Arnaud Patard <arnaud.patard@rtp-net.org> Cc: Roman Weissgaerber <weissg@vienna.at> Acked-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: Tony Olech <tony.olech@elandigitalsystems.com> Cc: Florian Floe Echtler <echtler@fs.tum.de> Cc: Christian Lucht <lucht@codemercs.com> Cc: Juergen Stuber <starblue@sourceforge.net> Cc: Georges Toth <g.toth@e-biz.lu> Cc: Bill Ryder <bryder@sgi.com> Cc: Kuba Ober <kuba@mareimbrium.org> Cc: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2011-08-23 04:12:03 -06:00
maxp = 0x7ff & usb_endpoint_maxp(desc);
maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
packets = DIV_ROUND_UP(bytes, maxp);
urb = usb_alloc_urb(packets, GFP_KERNEL);
if (!urb)
return urb;
urb->dev = udev;
urb->pipe = pipe;
urb->number_of_packets = packets;
urb->transfer_buffer_length = bytes;
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL,
&urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
return NULL;
}
if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset;
urb->transfer_dma += offset;
}
/* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes);
for (i = 0; i < packets; i++) {
/* here, only the last packet will be short */
urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
bytes -= urb->iso_frame_desc[i].length;
urb->iso_frame_desc[i].offset = maxp * i;
}
urb->complete = iso_callback;
/* urb->context = SET BY CALLER */
urb->interval = 1 << (desc->bInterval - 1);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
return urb;
}
static int
test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
{
struct iso_context context;
struct usb_device *udev;
unsigned i;
unsigned long packets = 0;
int status = 0;
struct urb *urbs[10]; /* FIXME no limit */
if (param->sglen > 10)
return -EDOM;
memset(&context, 0, sizeof(context));
context.count = param->iterations * param->sglen;
context.dev = dev;
init_completion(&context.done);
spin_lock_init(&context.lock);
memset(urbs, 0, sizeof(urbs));
udev = testdev_to_usbdev(dev);
dev_info(&dev->intf->dev,
"... iso period %d %sframes, wMaxPacket %04x\n",
1 << (desc->bInterval - 1),
(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
USB: use usb_endpoint_maxp() instead of le16_to_cpu() Now ${LINUX}/drivers/usb/* can use usb_endpoint_maxp(desc) to get maximum packet size instead of le16_to_cpu(desc->wMaxPacketSize). This patch fix it up Cc: Armin Fuerst <fuerst@in.tum.de> Cc: Pavel Machek <pavel@ucw.cz> Cc: Johannes Erdfelt <johannes@erdfelt.com> Cc: Vojtech Pavlik <vojtech@suse.cz> Cc: Oliver Neukum <oliver@neukum.name> Cc: David Kubicek <dave@awk.cz> Cc: Johan Hovold <jhovold@gmail.com> Cc: Brad Hards <bhards@bigpond.net.au> Acked-by: Felipe Balbi <balbi@ti.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Thomas Dahlmann <dahlmann.thomas@arcor.de> Cc: David Brownell <david-b@pacbell.net> Cc: David Lopo <dlopo@chipidea.mips.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Michal Nazarewicz <m.nazarewicz@samsung.com> Cc: Xie Xiaobo <X.Xie@freescale.com> Cc: Li Yang <leoli@freescale.com> Cc: Jiang Bo <tanya.jiang@freescale.com> Cc: Yuan-hsin Chen <yhchen@faraday-tech.com> Cc: Darius Augulis <augulis.darius@gmail.com> Cc: Xiaochen Shen <xiaochen.shen@intel.com> Cc: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Cc: OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Ben Dooks <ben@simtec.co.uk> Cc: Thomas Abraham <thomas.ab@samsung.com> Cc: Herbert Pötzl <herbert@13thfloor.at> Cc: Arnaud Patard <arnaud.patard@rtp-net.org> Cc: Roman Weissgaerber <weissg@vienna.at> Acked-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: Tony Olech <tony.olech@elandigitalsystems.com> Cc: Florian Floe Echtler <echtler@fs.tum.de> Cc: Christian Lucht <lucht@codemercs.com> Cc: Juergen Stuber <starblue@sourceforge.net> Cc: Georges Toth <g.toth@e-biz.lu> Cc: Bill Ryder <bryder@sgi.com> Cc: Kuba Ober <kuba@mareimbrium.org> Cc: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2011-08-23 04:12:03 -06:00
usb_endpoint_maxp(desc));
for (i = 0; i < param->sglen; i++) {
urbs[i] = iso_alloc_urb(udev, pipe, desc,
param->length, offset);
if (!urbs[i]) {
status = -ENOMEM;
goto fail;
}
packets += urbs[i]->number_of_packets;
urbs[i]->context = &context;
}
packets *= param->iterations;
dev_info(&dev->intf->dev,
"... total %lu msec (%lu packets)\n",
(packets * (1 << (desc->bInterval - 1)))
/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
packets);
spin_lock_irq(&context.lock);
for (i = 0; i < param->sglen; i++) {
++context.pending;
status = usb_submit_urb(urbs[i], GFP_ATOMIC);
if (status < 0) {
ERROR(dev, "submit iso[%d], error %d\n", i, status);
if (i == 0) {
spin_unlock_irq(&context.lock);
goto fail;
}
simple_free_urb(urbs[i]);
USB: usbtest: avoid to free coherent buffer in atomic context This patch fixes the warning below: [30753.755998] ------------[ cut here ]------------ [30753.755998] WARNING: at /home/tom/git/linux-2.6/linux-2.6-next/arch/x86/include/asm/dma-mapping.h:155 hcd_buffer_free+0xb1/0xd4 [usbcore]() [30753.755998] Hardware name: 6475EK2 [30753.755998] Modules linked in: uvcvideo ehci_hcd usbtest cdc_ether usbnet vfat fat usb_storage nfsd lockd nfs_acl auth_rpcgss exportfs mii tun videodev v4l1_compat v4l2_compat_ioctl32 fuse bridge stp llc sunrpc ipv6 cpufreq_ondemand acpi_cpufreq freq_table mperf kvm_intel kvm arc4 ecb ath5k usbhid mac80211 snd_hda_codec_conexant ch341 usbserial ath cfg80211 thinkpad_acpi snd_hda_intel pcspkr wmi hwmon yenta_socket iTCO_wdt iTCO_vendor_support i2c_i801 e1000e snd_hda_codec snd_hwdep snd_pcm snd_timer snd soundcore snd_page_alloc pata_acpi uhci_hcd ohci_hcd usbcore i915 drm_kms_helper drm i2c_algo_bit i2c_core video output [last unloaded: uvcvideo] [30753.755998] Pid: 0, comm: swapper Tainted: G W 2.6.35-rc6-gkh-wl+ #49 [30753.755998] Call Trace: [30753.755998] <IRQ> [<ffffffff8104478a>] warn_slowpath_common+0x80/0x98 [30753.755998] [<ffffffff810447b7>] warn_slowpath_null+0x15/0x17 [30753.755998] [<ffffffffa00ce02d>] hcd_buffer_free+0xb1/0xd4 [usbcore] [30753.755998] [<ffffffffa00c1345>] usb_free_coherent+0x1c/0x1e [usbcore] [30753.755998] [<ffffffffa00b13e4>] simple_free_urb+0x23/0x2f [usbtest] [30753.755998] [<ffffffffa00b210b>] iso_callback+0xbb/0x10f [usbtest] [30753.755998] [<ffffffffa00c7390>] usb_hcd_giveback_urb+0x8c/0xc0 [usbcore] [30753.755998] [<ffffffffa0449b35>] ehci_urb_done+0x84/0x95 [ehci_hcd] [30753.755998] [<ffffffffa044b5a5>] ehci_work+0x41a/0x7dd [ehci_hcd] [30753.755998] [<ffffffffa044e298>] ehci_irq+0x33b/0x370 [ehci_hcd] [30753.755998] [<ffffffff8100fb05>] ? sched_clock+0x9/0xd [30753.755998] [<ffffffff8105e641>] ? sched_clock_local+0x1c/0x82 [30753.755998] [<ffffffff8105e76a>] ? sched_clock_cpu+0xc3/0xce [30753.755998] [<ffffffff81067c7e>] ? trace_hardirqs_off+0xd/0xf [30753.755998] [<ffffffff8105e7b8>] ? cpu_clock+0x43/0x5e [30753.755998] [<ffffffffa00c6999>] usb_hcd_irq+0x45/0xa1 [usbcore] [30753.755998] [<ffffffff81092e02>] handle_IRQ_event+0x20/0xa5 [30753.755998] [<ffffffff81094cea>] handle_fasteoi_irq+0x92/0xd2 [30753.755998] [<ffffffff8100c0ed>] handle_irq+0x1f/0x2a [30753.755998] [<ffffffff8100b75d>] do_IRQ+0x57/0xbe [30753.755998] [<ffffffff8136a693>] ret_from_intr+0x0/0x16 [30753.755998] <EOI> [<ffffffff81223baa>] ? acpi_idle_enter_bm+0x231/0x269 [30753.755998] [<ffffffff81223ba3>] ? acpi_idle_enter_bm+0x22a/0x269 [30753.755998] [<ffffffff812c4b6b>] cpuidle_idle_call+0x99/0xce [30753.755998] [<ffffffff81008dd5>] cpu_idle+0x61/0xaa [30753.755998] [<ffffffff8136374b>] start_secondary+0x1c2/0x1c6 [30753.755998] ---[ end trace 904cfaf7ab4cb1a2 ]--- Signed-off-by: Ming Lei <tom.leiming@gmail.com> Cc: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2010-08-02 08:09:01 -06:00
urbs[i] = NULL;
context.pending--;
context.submit_error = 1;
break;
}
}
spin_unlock_irq(&context.lock);
wait_for_completion(&context.done);
USB: usbtest: avoid to free coherent buffer in atomic context This patch fixes the warning below: [30753.755998] ------------[ cut here ]------------ [30753.755998] WARNING: at /home/tom/git/linux-2.6/linux-2.6-next/arch/x86/include/asm/dma-mapping.h:155 hcd_buffer_free+0xb1/0xd4 [usbcore]() [30753.755998] Hardware name: 6475EK2 [30753.755998] Modules linked in: uvcvideo ehci_hcd usbtest cdc_ether usbnet vfat fat usb_storage nfsd lockd nfs_acl auth_rpcgss exportfs mii tun videodev v4l1_compat v4l2_compat_ioctl32 fuse bridge stp llc sunrpc ipv6 cpufreq_ondemand acpi_cpufreq freq_table mperf kvm_intel kvm arc4 ecb ath5k usbhid mac80211 snd_hda_codec_conexant ch341 usbserial ath cfg80211 thinkpad_acpi snd_hda_intel pcspkr wmi hwmon yenta_socket iTCO_wdt iTCO_vendor_support i2c_i801 e1000e snd_hda_codec snd_hwdep snd_pcm snd_timer snd soundcore snd_page_alloc pata_acpi uhci_hcd ohci_hcd usbcore i915 drm_kms_helper drm i2c_algo_bit i2c_core video output [last unloaded: uvcvideo] [30753.755998] Pid: 0, comm: swapper Tainted: G W 2.6.35-rc6-gkh-wl+ #49 [30753.755998] Call Trace: [30753.755998] <IRQ> [<ffffffff8104478a>] warn_slowpath_common+0x80/0x98 [30753.755998] [<ffffffff810447b7>] warn_slowpath_null+0x15/0x17 [30753.755998] [<ffffffffa00ce02d>] hcd_buffer_free+0xb1/0xd4 [usbcore] [30753.755998] [<ffffffffa00c1345>] usb_free_coherent+0x1c/0x1e [usbcore] [30753.755998] [<ffffffffa00b13e4>] simple_free_urb+0x23/0x2f [usbtest] [30753.755998] [<ffffffffa00b210b>] iso_callback+0xbb/0x10f [usbtest] [30753.755998] [<ffffffffa00c7390>] usb_hcd_giveback_urb+0x8c/0xc0 [usbcore] [30753.755998] [<ffffffffa0449b35>] ehci_urb_done+0x84/0x95 [ehci_hcd] [30753.755998] [<ffffffffa044b5a5>] ehci_work+0x41a/0x7dd [ehci_hcd] [30753.755998] [<ffffffffa044e298>] ehci_irq+0x33b/0x370 [ehci_hcd] [30753.755998] [<ffffffff8100fb05>] ? sched_clock+0x9/0xd [30753.755998] [<ffffffff8105e641>] ? sched_clock_local+0x1c/0x82 [30753.755998] [<ffffffff8105e76a>] ? sched_clock_cpu+0xc3/0xce [30753.755998] [<ffffffff81067c7e>] ? trace_hardirqs_off+0xd/0xf [30753.755998] [<ffffffff8105e7b8>] ? cpu_clock+0x43/0x5e [30753.755998] [<ffffffffa00c6999>] usb_hcd_irq+0x45/0xa1 [usbcore] [30753.755998] [<ffffffff81092e02>] handle_IRQ_event+0x20/0xa5 [30753.755998] [<ffffffff81094cea>] handle_fasteoi_irq+0x92/0xd2 [30753.755998] [<ffffffff8100c0ed>] handle_irq+0x1f/0x2a [30753.755998] [<ffffffff8100b75d>] do_IRQ+0x57/0xbe [30753.755998] [<ffffffff8136a693>] ret_from_intr+0x0/0x16 [30753.755998] <EOI> [<ffffffff81223baa>] ? acpi_idle_enter_bm+0x231/0x269 [30753.755998] [<ffffffff81223ba3>] ? acpi_idle_enter_bm+0x22a/0x269 [30753.755998] [<ffffffff812c4b6b>] cpuidle_idle_call+0x99/0xce [30753.755998] [<ffffffff81008dd5>] cpu_idle+0x61/0xaa [30753.755998] [<ffffffff8136374b>] start_secondary+0x1c2/0x1c6 [30753.755998] ---[ end trace 904cfaf7ab4cb1a2 ]--- Signed-off-by: Ming Lei <tom.leiming@gmail.com> Cc: stable <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2010-08-02 08:09:01 -06:00
for (i = 0; i < param->sglen; i++) {
if (urbs[i])
simple_free_urb(urbs[i]);
}
/*
* Isochronous transfers are expected to fail sometimes. As an
* arbitrary limit, we will report an error if any submissions
* fail or if the transfer failure rate is > 10%.
*/
if (status != 0)
;
else if (context.submit_error)
status = -EACCES;
else if (context.errors > context.packet_count / 10)
status = -EIO;
return status;
fail:
for (i = 0; i < param->sglen; i++) {
if (urbs[i])
simple_free_urb(urbs[i]);
}
return status;
}
static int test_unaligned_bulk(
struct usbtest_dev *tdev,
int pipe,
unsigned length,
int iterations,
unsigned transfer_flags,
const char *label)
{
int retval;
struct urb *urb = usbtest_alloc_urb(
2014-08-22 03:06:37 -06:00
testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1, 0);
if (!urb)
return -ENOMEM;
retval = simple_io(tdev, urb, iterations, 0, 0, label);
simple_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------------*/
/* We only have this one interface to user space, through usbfs.
* User mode code can scan usbfs to find N different devices (maybe on
* different busses) to use when testing, and allocate one thread per
* test. So discovery is simplified, and we have no device naming issues.
*
* Don't use these only as stress/load tests. Use them along with with
* other USB bus activity: plugging, unplugging, mousing, mp3 playback,
* video capture, and so on. Run different tests at different times, in
* different sequences. Nothing here should interact with other devices,
* except indirectly by consuming USB bandwidth and CPU resources for test
* threads and request completion. But the only way to know that for sure
* is to test when HC queues are in use by many devices.
*
* WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
* it locks out usbcore in certain code paths. Notably, if you disconnect
* the device-under-test, hub_wq will wait block forever waiting for the
* ioctl to complete ... so that usb_disconnect() can abort the pending
* urbs and then call usbtest_disconnect(). To abort a test, you're best
* off just killing the userspace task and waiting for it to exit.
*/
static int
usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
struct usb_device *udev = testdev_to_usbdev(dev);
struct usbtest_param *param = buf;
int retval = -EOPNOTSUPP;
struct urb *urb;
struct scatterlist *sg;
struct usb_sg_request req;
struct timeval start;
unsigned i;
/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
pattern = mod_pattern;
if (code != USBTEST_REQUEST)
return -EOPNOTSUPP;
if (param->iterations <= 0)
return -EINVAL;
if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
/* FIXME: What if a system sleep starts while a test is running? */
/* some devices, like ez-usb default devices, need a non-default
* altsetting to have any active endpoints. some tests change
* altsettings; force a default so most tests don't need to check.
*/
if (dev->info->alt >= 0) {
int res;
if (intf->altsetting->desc.bInterfaceNumber) {
mutex_unlock(&dev->lock);
return -ENODEV;
}
res = set_altsetting(dev, dev->info->alt);
if (res) {
dev_err(&intf->dev,
"set altsetting to %d failed, %d\n",
dev->info->alt, res);
mutex_unlock(&dev->lock);
return res;
}
}
/*
* Just a bunch of test cases that every HCD is expected to handle.
*
* Some may need specific firmware, though it'd be good to have
* one firmware image to handle all the test cases.
*
* FIXME add more tests! cancel requests, verify the data, control
* queueing, concurrent read+write threads, and so on.
*/
do_gettimeofday(&start);
switch (param->test_num) {
case 0:
dev_info(&intf->dev, "TEST 0: NOP\n");
retval = 0;
break;
/* Simple non-queued bulk I/O tests */
case 1:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 1: write %d bytes %u times\n",
param->length, param->iterations);
2014-08-22 03:06:37 -06:00
urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
simple_free_urb(urb);
break;
case 2:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 2: read %d bytes %u times\n",
param->length, param->iterations);
2014-08-22 03:06:37 -06:00
urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
simple_free_urb(urb);
break;
case 3:
if (dev->out_pipe == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 3: write/%d 0..%d bytes %u times\n",
param->vary, param->length, param->iterations);
2014-08-22 03:06:37 -06:00
urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, param->vary,
0, "test3");
simple_free_urb(urb);
break;
case 4:
if (dev->in_pipe == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 4: read/%d 0..%d bytes %u times\n",
param->vary, param->length, param->iterations);
2014-08-22 03:06:37 -06:00
urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, param->vary,
0, "test4");
simple_free_urb(urb);
break;
/* Queued bulk I/O tests */
case 5:
if (dev->out_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 5: write %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, 0);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = perform_sglist(dev, param->iterations, dev->out_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 6:
if (dev->in_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 6: read %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, 0);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = perform_sglist(dev, param->iterations, dev->in_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 7:
if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, param->vary);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = perform_sglist(dev, param->iterations, dev->out_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 8:
if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, param->vary);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = perform_sglist(dev, param->iterations, dev->in_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
/* non-queued sanity tests for control (chapter 9 subset) */
case 9:
retval = 0;
dev_info(&intf->dev,
"TEST 9: ch9 (subset) control tests, %d times\n",
param->iterations);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = ch9_postconfig(dev);
if (retval)
dev_err(&intf->dev, "ch9 subset failed, "
"iterations left %d\n", i);
break;
/* queued control messaging */
case 10:
retval = 0;
dev_info(&intf->dev,
"TEST 10: queue %d control calls, %d times\n",
param->sglen,
param->iterations);
retval = test_ctrl_queue(dev, param);
break;
/* simple non-queued unlinks (ring with one urb) */
case 11:
if (dev->in_pipe == 0 || !param->length)
break;
retval = 0;
dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
param->iterations, param->length);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = unlink_simple(dev, dev->in_pipe,
param->length);
if (retval)
dev_err(&intf->dev, "unlink reads failed %d, "
"iterations left %d\n", retval, i);
break;
case 12:
if (dev->out_pipe == 0 || !param->length)
break;
retval = 0;
dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
param->iterations, param->length);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = unlink_simple(dev, dev->out_pipe,
param->length);
if (retval)
dev_err(&intf->dev, "unlink writes failed %d, "
"iterations left %d\n", retval, i);
break;
/* ep halt tests */
case 13:
if (dev->out_pipe == 0 && dev->in_pipe == 0)
break;
retval = 0;
dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
param->iterations);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = halt_simple(dev);
if (retval)
ERROR(dev, "halts failed, iterations left %d\n", i);
break;
/* control write tests */
case 14:
if (!dev->info->ctrl_out)
break;
dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
param->iterations,
realworld ? 1 : 0, param->length,
param->vary);
retval = ctrl_out(dev, param->iterations,
param->length, param->vary, 0);
break;
/* iso write tests */
case 15:
if (dev->out_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 15: write %d iso, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
/* FIRMWARE: iso sink */
retval = test_iso_queue(dev, param,
dev->out_iso_pipe, dev->iso_out, 0);
break;
/* iso read tests */
case 16:
if (dev->in_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 16: read %d iso, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
/* FIRMWARE: iso source */
retval = test_iso_queue(dev, param,
dev->in_iso_pipe, dev->iso_in, 0);
break;
/* FIXME scatterlist cancel (needs helper thread) */
/* Tests for bulk I/O using DMA mapping by core and odd address */
case 17:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 17: write odd addr %d bytes %u times core map\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->out_pipe,
param->length, param->iterations,
0, "test17");
break;
case 18:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 18: read odd addr %d bytes %u times core map\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->in_pipe,
param->length, param->iterations,
0, "test18");
break;
/* Tests for bulk I/O using premapped coherent buffer and odd address */
case 19:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 19: write odd addr %d bytes %u times premapped\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->out_pipe,
param->length, param->iterations,
URB_NO_TRANSFER_DMA_MAP, "test19");
break;
case 20:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 20: read odd addr %d bytes %u times premapped\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->in_pipe,
param->length, param->iterations,
URB_NO_TRANSFER_DMA_MAP, "test20");
break;
/* control write tests with unaligned buffer */
case 21:
if (!dev->info->ctrl_out)
break;
dev_info(&intf->dev,
"TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
param->iterations,
realworld ? 1 : 0, param->length,
param->vary);
retval = ctrl_out(dev, param->iterations,
param->length, param->vary, 1);
break;
/* unaligned iso tests */
case 22:
if (dev->out_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 22: write %d iso odd, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
retval = test_iso_queue(dev, param,
dev->out_iso_pipe, dev->iso_out, 1);
break;
case 23:
if (dev->in_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 23: read %d iso odd, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
retval = test_iso_queue(dev, param,
dev->in_iso_pipe, dev->iso_in, 1);
break;
/* unlink URBs from a bulk-OUT queue */
case 24:
if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
break;
retval = 0;
dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
"%d %d-byte writes\n",
param->iterations, param->sglen, param->length);
for (i = param->iterations; retval == 0 && i > 0; --i) {
retval = unlink_queued(dev, dev->out_pipe,
param->sglen, param->length);
if (retval) {
dev_err(&intf->dev,
"unlink queued writes failed %d, "
"iterations left %d\n", retval, i);
break;
}
}
break;
2014-08-22 03:06:37 -06:00
/* Simple non-queued interrupt I/O tests */
case 25:
if (dev->out_int_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 25: write %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length,
dev->int_out->bInterval);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: interrupt sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test25");
simple_free_urb(urb);
break;
case 26:
if (dev->in_int_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 26: read %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length,
dev->int_in->bInterval);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: interrupt source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test26");
simple_free_urb(urb);
break;
}
do_gettimeofday(&param->duration);
param->duration.tv_sec -= start.tv_sec;
param->duration.tv_usec -= start.tv_usec;
if (param->duration.tv_usec < 0) {
param->duration.tv_usec += 1000 * 1000;
param->duration.tv_sec -= 1;
}
mutex_unlock(&dev->lock);
return retval;
}
/*-------------------------------------------------------------------------*/
static unsigned force_interrupt;
module_param(force_interrupt, uint, 0);
MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
#ifdef GENERIC
static unsigned short vendor;
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
static unsigned short product;
module_param(product, ushort, 0);
MODULE_PARM_DESC(product, "product code (from vendor)");
#endif
static int
usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev;
struct usbtest_dev *dev;
struct usbtest_info *info;
char *rtest, *wtest;
char *irtest, *iwtest;
2014-08-22 03:06:37 -06:00
char *intrtest, *intwtest;
udev = interface_to_usbdev(intf);
#ifdef GENERIC
/* specify devices by module parameters? */
if (id->match_flags == 0) {
/* vendor match required, product match optional */
if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
return -ENODEV;
if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
return -ENODEV;
dev_info(&intf->dev, "matched module params, "
"vend=0x%04x prod=0x%04x\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
}
#endif
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
info = (struct usbtest_info *) id->driver_info;
dev->info = info;
mutex_init(&dev->lock);
dev->intf = intf;
/* cacheline-aligned scratch for i/o */
dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
if (dev->buf == NULL) {
kfree(dev);
return -ENOMEM;
}
/* NOTE this doesn't yet test the handful of difference that are
* visible with high speed interrupts: bigger maxpacket (1K) and
* "high bandwidth" modes (up to 3 packets/uframe).
*/
rtest = wtest = "";
irtest = iwtest = "";
2014-08-22 03:06:37 -06:00
intrtest = intwtest = "";
if (force_interrupt || udev->speed == USB_SPEED_LOW) {
if (info->ep_in) {
dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
rtest = " intr-in";
}
if (info->ep_out) {
dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
wtest = " intr-out";
}
} else {
if (override_alt >= 0 || info->autoconf) {
int status;
status = get_endpoints(dev, intf);
if (status < 0) {
WARNING(dev, "couldn't get endpoints, %d\n",
status);
kfree(dev->buf);
kfree(dev);
return status;
}
/* may find bulk or ISO pipes */
} else {
if (info->ep_in)
dev->in_pipe = usb_rcvbulkpipe(udev,
info->ep_in);
if (info->ep_out)
dev->out_pipe = usb_sndbulkpipe(udev,
info->ep_out);
}
if (dev->in_pipe)
rtest = " bulk-in";
if (dev->out_pipe)
wtest = " bulk-out";
if (dev->in_iso_pipe)
irtest = " iso-in";
if (dev->out_iso_pipe)
iwtest = " iso-out";
2014-08-22 03:06:37 -06:00
if (dev->in_int_pipe)
intrtest = " int-in";
if (dev->out_int_pipe)
intwtest = " int-out";
}
usb_set_intfdata(intf, dev);
dev_info(&intf->dev, "%s\n", info->name);
2014-08-22 03:06:37 -06:00
dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n",
usb_speed_string(udev->speed),
info->ctrl_out ? " in/out" : "",
rtest, wtest,
irtest, iwtest,
2014-08-22 03:06:37 -06:00
intrtest, intwtest,
info->alt >= 0 ? " (+alt)" : "");
return 0;
}
static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int usbtest_resume(struct usb_interface *intf)
{
return 0;
}
static void usbtest_disconnect(struct usb_interface *intf)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
dev_dbg(&intf->dev, "disconnect\n");
kfree(dev);
}
/* Basic testing only needs a device that can source or sink bulk traffic.
* Any device can test control transfers (default with GENERIC binding).
*
* Several entries work with the default EP0 implementation that's built
* into EZ-USB chips. There's a default vendor ID which can be overridden
* by (very) small config EEPROMS, but otherwise all these devices act
* identically until firmware is loaded: only EP0 works. It turns out
* to be easy to make other endpoints work, without modifying that EP0
* behavior. For now, we expect that kind of firmware.
*/
/* an21xx or fx versions of ez-usb */
static struct usbtest_info ez1_info = {
.name = "EZ-USB device",
.ep_in = 2,
.ep_out = 2,
.alt = 1,
};
/* fx2 version of ez-usb */
static struct usbtest_info ez2_info = {
.name = "FX2 device",
.ep_in = 6,
.ep_out = 2,
.alt = 1,
};
/* ezusb family device with dedicated usb test firmware,
*/
static struct usbtest_info fw_info = {
.name = "usb test device",
.ep_in = 2,
.ep_out = 2,
.alt = 1,
.autoconf = 1, /* iso and ctrl_out need autoconf */
.ctrl_out = 1,
.iso = 1, /* iso_ep's are #8 in/out */
};
/* peripheral running Linux and 'zero.c' test firmware, or
* its user-mode cousin. different versions of this use
* different hardware with the same vendor/product codes.
* host side MUST rely on the endpoint descriptors.
*/
static struct usbtest_info gz_info = {
.name = "Linux gadget zero",
.autoconf = 1,
.ctrl_out = 1,
.iso = 1,
2014-08-22 03:06:37 -06:00
.intr = 1,
.alt = 0,
};
static struct usbtest_info um_info = {
.name = "Linux user mode test driver",
.autoconf = 1,
.alt = -1,
};
static struct usbtest_info um2_info = {
.name = "Linux user mode ISO test driver",
.autoconf = 1,
.iso = 1,
.alt = -1,
};
#ifdef IBOT2
/* this is a nice source of high speed bulk data;
* uses an FX2, with firmware provided in the device
*/
static struct usbtest_info ibot2_info = {
.name = "iBOT2 webcam",
.ep_in = 2,
.alt = -1,
};
#endif
#ifdef GENERIC
/* we can use any device to test control traffic */
static struct usbtest_info generic_info = {
.name = "Generic USB device",
.alt = -1,
};
#endif
static const struct usb_device_id id_table[] = {
/*-------------------------------------------------------------*/
/* EZ-USB devices which download firmware to replace (or in our
* case augment) the default device implementation.
*/
/* generic EZ-USB FX controller */
{ USB_DEVICE(0x0547, 0x2235),
.driver_info = (unsigned long) &ez1_info,
},
/* CY3671 development board with EZ-USB FX */
{ USB_DEVICE(0x0547, 0x0080),
.driver_info = (unsigned long) &ez1_info,
},
/* generic EZ-USB FX2 controller (or development board) */
{ USB_DEVICE(0x04b4, 0x8613),
.driver_info = (unsigned long) &ez2_info,
},
/* re-enumerated usb test device firmware */
{ USB_DEVICE(0xfff0, 0xfff0),
.driver_info = (unsigned long) &fw_info,
},
/* "Gadget Zero" firmware runs under Linux */
{ USB_DEVICE(0x0525, 0xa4a0),
.driver_info = (unsigned long) &gz_info,
},
/* so does a user-mode variant */
{ USB_DEVICE(0x0525, 0xa4a4),
.driver_info = (unsigned long) &um_info,
},
/* ... and a user-mode variant that talks iso */
{ USB_DEVICE(0x0525, 0xa4a3),
.driver_info = (unsigned long) &um2_info,
},
#ifdef KEYSPAN_19Qi
/* Keyspan 19qi uses an21xx (original EZ-USB) */
/* this does not coexist with the real Keyspan 19qi driver! */
{ USB_DEVICE(0x06cd, 0x010b),
.driver_info = (unsigned long) &ez1_info,
},
#endif
/*-------------------------------------------------------------*/
#ifdef IBOT2
/* iBOT2 makes a nice source of high speed bulk-in data */
/* this does not coexist with a real iBOT2 driver! */
{ USB_DEVICE(0x0b62, 0x0059),
.driver_info = (unsigned long) &ibot2_info,
},
#endif
/*-------------------------------------------------------------*/
#ifdef GENERIC
/* module params can specify devices to use for control tests */
{ .driver_info = (unsigned long) &generic_info, },
#endif
/*-------------------------------------------------------------*/
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver usbtest_driver = {
.name = "usbtest",
.id_table = id_table,
.probe = usbtest_probe,
.unlocked_ioctl = usbtest_ioctl,
.disconnect = usbtest_disconnect,
.suspend = usbtest_suspend,
.resume = usbtest_resume,
};
/*-------------------------------------------------------------------------*/
static int __init usbtest_init(void)
{
#ifdef GENERIC
if (vendor)
pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
#endif
return usb_register(&usbtest_driver);
}
module_init(usbtest_init);
static void __exit usbtest_exit(void)
{
usb_deregister(&usbtest_driver);
}
module_exit(usbtest_exit);
MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
MODULE_LICENSE("GPL");