Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (23 commits)
  firewire: ohci: extend initialization log message
  firewire: ohci: fix IR/IT context mask mixup
  firewire: ohci: add module parameter to activate quirk fixes
  firewire: ohci: use an ID table for quirks detection
  firewire: ohci: reorder struct fw_ohci for better cache efficiency
  firewire: ohci: remove unused dualbuffer IR code
  firewire: core: combine a bit of repeated code
  firewire: core: change type of a data buffer
  firewire: cdev: increment ABI version number
  firewire: cdev: add more flexible cycle timer ioctl
  firewire: core: rename an internal function
  firewire: core: fix an information leak
  firewire: core: increase stack size of config ROM reader
  firewire: core: don't fail device creation in case of too large config ROM blocks
  firewire: core: fix "giving up on config rom" with Panasonic AG-DV2500
  firewire: remove incomplete Bus_Time CSR support
  firewire: get_cycle_timer optimization and cleanup
  firewire: ohci: enable cycle timer fix on ALi and NEC controllers
  firewire: ohci: work around cycle timer bugs on VIA controllers
  firewire: make PCI device id constant
  ...
This commit is contained in:
Linus Torvalds 2010-03-03 08:08:44 -08:00
commit c1dcb4bb1e
10 changed files with 492 additions and 551 deletions

View file

@ -25,6 +25,7 @@
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@ -32,7 +33,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@ -368,39 +368,56 @@ void fw_device_cdev_remove(struct fw_device *device)
for_each_client(device, wake_up_client);
}
static int ioctl_get_info(struct client *client, void *buffer)
union ioctl_arg {
struct fw_cdev_get_info get_info;
struct fw_cdev_send_request send_request;
struct fw_cdev_allocate allocate;
struct fw_cdev_deallocate deallocate;
struct fw_cdev_send_response send_response;
struct fw_cdev_initiate_bus_reset initiate_bus_reset;
struct fw_cdev_add_descriptor add_descriptor;
struct fw_cdev_remove_descriptor remove_descriptor;
struct fw_cdev_create_iso_context create_iso_context;
struct fw_cdev_queue_iso queue_iso;
struct fw_cdev_start_iso start_iso;
struct fw_cdev_stop_iso stop_iso;
struct fw_cdev_get_cycle_timer get_cycle_timer;
struct fw_cdev_allocate_iso_resource allocate_iso_resource;
struct fw_cdev_send_stream_packet send_stream_packet;
struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
};
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_info *get_info = buffer;
struct fw_cdev_get_info *a = &arg->get_info;
struct fw_cdev_event_bus_reset bus_reset;
unsigned long ret = 0;
client->version = get_info->version;
get_info->version = FW_CDEV_VERSION;
get_info->card = client->device->card->index;
client->version = a->version;
a->version = FW_CDEV_VERSION;
a->card = client->device->card->index;
down_read(&fw_device_rwsem);
if (get_info->rom != 0) {
void __user *uptr = u64_to_uptr(get_info->rom);
size_t want = get_info->rom_length;
if (a->rom != 0) {
size_t want = a->rom_length;
size_t have = client->device->config_rom_length * 4;
ret = copy_to_user(uptr, client->device->config_rom,
min(want, have));
ret = copy_to_user(u64_to_uptr(a->rom),
client->device->config_rom, min(want, have));
}
get_info->rom_length = client->device->config_rom_length * 4;
a->rom_length = client->device->config_rom_length * 4;
up_read(&fw_device_rwsem);
if (ret != 0)
return -EFAULT;
client->bus_reset_closure = get_info->bus_reset_closure;
if (get_info->bus_reset != 0) {
void __user *uptr = u64_to_uptr(get_info->bus_reset);
client->bus_reset_closure = a->bus_reset_closure;
if (a->bus_reset != 0) {
fill_bus_reset_event(&bus_reset, client);
if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
if (copy_to_user(u64_to_uptr(a->bus_reset),
&bus_reset, sizeof(bus_reset)))
return -EFAULT;
}
@ -571,11 +588,9 @@ static int init_request(struct client *client,
return ret;
}
static int ioctl_send_request(struct client *client, void *buffer)
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_send_request *request = buffer;
switch (request->tcode) {
switch (arg->send_request.tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_READ_QUADLET_REQUEST:
@ -592,7 +607,7 @@ static int ioctl_send_request(struct client *client, void *buffer)
return -EINVAL;
}
return init_request(client, request, client->device->node_id,
return init_request(client, &arg->send_request, client->device->node_id,
client->device->max_speed);
}
@ -683,9 +698,9 @@ static void release_address_handler(struct client *client,
kfree(r);
}
static int ioctl_allocate(struct client *client, void *buffer)
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_allocate *request = buffer;
struct fw_cdev_allocate *a = &arg->allocate;
struct address_handler_resource *r;
struct fw_address_region region;
int ret;
@ -694,13 +709,13 @@ static int ioctl_allocate(struct client *client, void *buffer)
if (r == NULL)
return -ENOMEM;
region.start = request->offset;
region.end = request->offset + request->length;
r->handler.length = request->length;
region.start = a->offset;
region.end = a->offset + a->length;
r->handler.length = a->length;
r->handler.address_callback = handle_request;
r->handler.callback_data = r;
r->closure = request->closure;
r->client = client;
r->handler.callback_data = r;
r->closure = a->closure;
r->client = client;
ret = fw_core_add_address_handler(&r->handler, &region);
if (ret < 0) {
@ -714,27 +729,25 @@ static int ioctl_allocate(struct client *client, void *buffer)
release_address_handler(client, &r->resource);
return ret;
}
request->handle = r->resource.handle;
a->handle = r->resource.handle;
return 0;
}
static int ioctl_deallocate(struct client *client, void *buffer)
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_deallocate *request = buffer;
return release_client_resource(client, request->handle,
return release_client_resource(client, arg->deallocate.handle,
release_address_handler, NULL);
}
static int ioctl_send_response(struct client *client, void *buffer)
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_send_response *request = buffer;
struct fw_cdev_send_response *a = &arg->send_response;
struct client_resource *resource;
struct inbound_transaction_resource *r;
int ret = 0;
if (release_client_resource(client, request->handle,
if (release_client_resource(client, a->handle,
release_request, &resource) < 0)
return -EINVAL;
@ -743,28 +756,24 @@ static int ioctl_send_response(struct client *client, void *buffer)
if (is_fcp_request(r->request))
goto out;
if (request->length < r->length)
r->length = request->length;
if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
if (a->length < r->length)
r->length = a->length;
if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
ret = -EFAULT;
kfree(r->request);
goto out;
}
fw_send_response(client->device->card, r->request, request->rcode);
fw_send_response(client->device->card, r->request, a->rcode);
out:
kfree(r);
return ret;
}
static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_initiate_bus_reset *request = buffer;
int short_reset;
short_reset = (request->type == FW_CDEV_SHORT_RESET);
return fw_core_initiate_bus_reset(client->device->card, short_reset);
return fw_core_initiate_bus_reset(client->device->card,
arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
}
static void release_descriptor(struct client *client,
@ -777,9 +786,9 @@ static void release_descriptor(struct client *client,
kfree(r);
}
static int ioctl_add_descriptor(struct client *client, void *buffer)
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_add_descriptor *request = buffer;
struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
struct descriptor_resource *r;
int ret;
@ -787,22 +796,21 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
if (!client->device->is_local)
return -ENOSYS;
if (request->length > 256)
if (a->length > 256)
return -EINVAL;
r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
if (copy_from_user(r->data,
u64_to_uptr(request->data), request->length * 4)) {
if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
ret = -EFAULT;
goto failed;
}
r->descriptor.length = request->length;
r->descriptor.immediate = request->immediate;
r->descriptor.key = request->key;
r->descriptor.length = a->length;
r->descriptor.immediate = a->immediate;
r->descriptor.key = a->key;
r->descriptor.data = r->data;
ret = fw_core_add_descriptor(&r->descriptor);
@ -815,7 +823,7 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
fw_core_remove_descriptor(&r->descriptor);
goto failed;
}
request->handle = r->resource.handle;
a->handle = r->resource.handle;
return 0;
failed:
@ -824,11 +832,9 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
return ret;
}
static int ioctl_remove_descriptor(struct client *client, void *buffer)
static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_remove_descriptor *request = buffer;
return release_client_resource(client, request->handle,
return release_client_resource(client, arg->remove_descriptor.handle,
release_descriptor, NULL);
}
@ -851,49 +857,44 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
sizeof(e->interrupt) + header_length, NULL, 0);
}
static int ioctl_create_iso_context(struct client *client, void *buffer)
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *request = buffer;
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
/* We only support one context at this time. */
if (client->iso_context != NULL)
return -EBUSY;
if (request->channel > 63)
if (a->channel > 63)
return -EINVAL;
switch (request->type) {
switch (a->type) {
case FW_ISO_CONTEXT_RECEIVE:
if (request->header_size < 4 || (request->header_size & 3))
if (a->header_size < 4 || (a->header_size & 3))
return -EINVAL;
break;
case FW_ISO_CONTEXT_TRANSMIT:
if (request->speed > SCODE_3200)
if (a->speed > SCODE_3200)
return -EINVAL;
break;
default:
return -EINVAL;
}
context = fw_iso_context_create(client->device->card,
request->type,
request->channel,
request->speed,
request->header_size,
iso_callback, client);
context = fw_iso_context_create(client->device->card, a->type,
a->channel, a->speed, a->header_size,
iso_callback, client);
if (IS_ERR(context))
return PTR_ERR(context);
client->iso_closure = request->closure;
client->iso_closure = a->closure;
client->iso_context = context;
/* We only support one context at this time. */
request->handle = 0;
a->handle = 0;
return 0;
}
@ -906,9 +907,9 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
#define GET_SY(v) (((v) >> 20) & 0x0f)
#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
static int ioctl_queue_iso(struct client *client, void *buffer)
static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_queue_iso *request = buffer;
struct fw_cdev_queue_iso *a = &arg->queue_iso;
struct fw_cdev_iso_packet __user *p, *end, *next;
struct fw_iso_context *ctx = client->iso_context;
unsigned long payload, buffer_end, header_length;
@ -919,7 +920,7 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
u8 header[256];
} u;
if (ctx == NULL || request->handle != 0)
if (ctx == NULL || a->handle != 0)
return -EINVAL;
/*
@ -929,23 +930,23 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
* set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped
* and the request->data pointer is ignored.
* and the a->data pointer is ignored.
*/
payload = (unsigned long)request->data - client->vm_start;
payload = (unsigned long)a->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT;
if (request->data == 0 || client->buffer.pages == NULL ||
if (a->data == 0 || client->buffer.pages == NULL ||
payload >= buffer_end) {
payload = 0;
buffer_end = 0;
}
p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
if (!access_ok(VERIFY_READ, p, request->size))
if (!access_ok(VERIFY_READ, p, a->size))
return -EFAULT;
end = (void __user *)p + request->size;
end = (void __user *)p + a->size;
count = 0;
while (p < end) {
if (get_user(control, &p->control))
@ -995,61 +996,78 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
count++;
}
request->size -= uptr_to_u64(p) - request->packets;
request->packets = uptr_to_u64(p);
request->data = client->vm_start + payload;
a->size -= uptr_to_u64(p) - a->packets;
a->packets = uptr_to_u64(p);
a->data = client->vm_start + payload;
return count;
}
static int ioctl_start_iso(struct client *client, void *buffer)
static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_start_iso *request = buffer;
struct fw_cdev_start_iso *a = &arg->start_iso;
if (client->iso_context == NULL || request->handle != 0)
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
if (request->tags == 0 || request->tags > 15)
return -EINVAL;
if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
(a->tags == 0 || a->tags > 15 || a->sync > 15))
return -EINVAL;
if (request->sync > 15)
return -EINVAL;
}
return fw_iso_context_start(client->iso_context, request->cycle,
request->sync, request->tags);
return fw_iso_context_start(client->iso_context,
a->cycle, a->sync, a->tags);
}
static int ioctl_stop_iso(struct client *client, void *buffer)
static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_stop_iso *request = buffer;
struct fw_cdev_stop_iso *a = &arg->stop_iso;
if (client->iso_context == NULL || request->handle != 0)
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_stop(client->iso_context);
}
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer *request = buffer;
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
unsigned long long bus_time;
struct timeval tv;
unsigned long flags;
struct timespec ts = {0, 0};
u32 cycle_time;
int ret = 0;
preempt_disable();
local_irq_save(flags);
local_irq_disable();
bus_time = card->driver->get_bus_time(card);
do_gettimeofday(&tv);
cycle_time = card->driver->get_cycle_time(card);
local_irq_restore(flags);
preempt_enable();
switch (a->clk_id) {
case CLOCK_REALTIME: getnstimeofday(&ts); break;
case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
default:
ret = -EINVAL;
}
local_irq_enable();
a->tv_sec = ts.tv_sec;
a->tv_nsec = ts.tv_nsec;
a->cycle_timer = cycle_time;
return ret;
}
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
struct fw_cdev_get_cycle_timer2 ct2;
ct2.clk_id = CLOCK_REALTIME;
ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
a->cycle_timer = ct2.cycle_timer;
request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
request->cycle_timer = bus_time & 0xffffffff;
return 0;
}
@ -1220,33 +1238,32 @@ static int init_iso_resource(struct client *client,
return ret;
}
static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
static int ioctl_allocate_iso_resource(struct client *client,
union ioctl_arg *arg)
{
struct fw_cdev_allocate_iso_resource *request = buffer;
return init_iso_resource(client, request, ISO_RES_ALLOC);
return init_iso_resource(client,
&arg->allocate_iso_resource, ISO_RES_ALLOC);
}
static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
static int ioctl_deallocate_iso_resource(struct client *client,
union ioctl_arg *arg)
{
struct fw_cdev_deallocate *request = buffer;
return release_client_resource(client, request->handle,
release_iso_resource, NULL);
return release_client_resource(client,
arg->deallocate.handle, release_iso_resource, NULL);
}
static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
static int ioctl_allocate_iso_resource_once(struct client *client,
union ioctl_arg *arg)
{
struct fw_cdev_allocate_iso_resource *request = buffer;
return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
return init_iso_resource(client,
&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
}
static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
static int ioctl_deallocate_iso_resource_once(struct client *client,
union ioctl_arg *arg)
{
struct fw_cdev_allocate_iso_resource *request = buffer;
return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
return init_iso_resource(client,
&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
}
/*
@ -1254,16 +1271,17 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe
* limited by the device's link speed, the local node's link speed,
* and all PHY port speeds between the two links.
*/
static int ioctl_get_speed(struct client *client, void *buffer)
static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
{
return client->device->max_speed;
}
static int ioctl_send_broadcast_request(struct client *client, void *buffer)
static int ioctl_send_broadcast_request(struct client *client,
union ioctl_arg *arg)
{
struct fw_cdev_send_request *request = buffer;
struct fw_cdev_send_request *a = &arg->send_request;
switch (request->tcode) {
switch (a->tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
break;
@ -1272,36 +1290,36 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer)
}
/* Security policy: Only allow accesses to Units Space. */
if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
return -EACCES;
return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
}
static int ioctl_send_stream_packet(struct client *client, void *buffer)
static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_send_stream_packet *p = buffer;
struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
struct fw_cdev_send_request request;
int dest;
if (p->speed > client->device->card->link_speed ||
p->length > 1024 << p->speed)
if (a->speed > client->device->card->link_speed ||
a->length > 1024 << a->speed)
return -EIO;
if (p->tag > 3 || p->channel > 63 || p->sy > 15)
if (a->tag > 3 || a->channel > 63 || a->sy > 15)
return -EINVAL;
dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
request.tcode = TCODE_STREAM_DATA;
request.length = p->length;
request.closure = p->closure;
request.data = p->data;
request.generation = p->generation;
request.length = a->length;
request.closure = a->closure;
request.data = a->data;
request.generation = a->generation;
return init_request(client, &request, dest, p->speed);
return init_request(client, &request, dest, a->speed);
}
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
ioctl_get_info,
ioctl_send_request,
ioctl_allocate,
@ -1322,47 +1340,35 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_get_speed,
ioctl_send_broadcast_request,
ioctl_send_stream_packet,
ioctl_get_cycle_timer2,
};
static int dispatch_ioctl(struct client *client,
unsigned int cmd, void __user *arg)
{
char buffer[sizeof(union {
struct fw_cdev_get_info _00;
struct fw_cdev_send_request _01;
struct fw_cdev_allocate _02;
struct fw_cdev_deallocate _03;
struct fw_cdev_send_response _04;
struct fw_cdev_initiate_bus_reset _05;
struct fw_cdev_add_descriptor _06;
struct fw_cdev_remove_descriptor _07;
struct fw_cdev_create_iso_context _08;
struct fw_cdev_queue_iso _09;
struct fw_cdev_start_iso _0a;
struct fw_cdev_stop_iso _0b;
struct fw_cdev_get_cycle_timer _0c;
struct fw_cdev_allocate_iso_resource _0d;
struct fw_cdev_send_stream_packet _13;
})];
union ioctl_arg buffer;
int ret;
if (fw_device_is_shutdown(client->device))
return -ENODEV;
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
return -EINVAL;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
return -EFAULT;
}
ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
if (ret < 0)
return ret;
if (_IOC_DIR(cmd) & _IOC_READ) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
return -EFAULT;
}
@ -1372,24 +1378,14 @@ static int dispatch_ioctl(struct client *client,
static long fw_device_op_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct client *client = file->private_data;
if (fw_device_is_shutdown(client->device))
return -ENODEV;
return dispatch_ioctl(client, cmd, (void __user *) arg);
return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long fw_device_op_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct client *client = file->private_data;
if (fw_device_is_shutdown(client->device))
return -ENODEV;
return dispatch_ioctl(client, cmd, compat_ptr(arg));
return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
}
#endif

View file

@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
@ -43,7 +44,7 @@
#include "core.h"
void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
ci->end = ci->p + (p[0] >> 16);
@ -59,9 +60,76 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
}
EXPORT_SYMBOL(fw_csr_iterator_next);
static const u32 *search_leaf(const u32 *directory, int search_key)
{
struct fw_csr_iterator ci;
int last_key = 0, key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (last_key == search_key &&
key == (CSR_DESCRIPTOR | CSR_LEAF))
return ci.p - 1 + value;
last_key = key;
}
return NULL;
}
static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
{
unsigned int quadlets, i;
char c;
if (!size || !buf)
return -EINVAL;
quadlets = min(block[0] >> 16, 256U);
if (quadlets < 2)
return -ENODATA;
if (block[1] != 0 || block[2] != 0)
/* unknown language/character set */
return -ENODATA;
block += 3;
quadlets -= 2;
for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
c = block[i / 4] >> (24 - 8 * (i % 4));
if (c == '\0')
break;
buf[i] = c;
}
buf[i] = '\0';
return i;
}
/**
* fw_csr_string - reads a string from the configuration ROM
* @directory: e.g. root directory or unit directory
* @key: the key of the preceding directory entry
* @buf: where to put the string
* @size: size of @buf, in bytes
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
* Returns strlen(buf) or a negative error code.
*/
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
{
const u32 *leaf = search_leaf(directory, key);
if (!leaf)
return -ENOENT;
return textual_leaf_to_string(leaf, buf, size);
}
EXPORT_SYMBOL(fw_csr_string);
static bool is_fw_unit(struct device *dev);
static int match_unit_directory(u32 *directory, u32 match_flags,
static int match_unit_directory(const u32 *directory, u32 match_flags,
const struct ieee1394_device_id *id)
{
struct fw_csr_iterator ci;
@ -195,7 +263,7 @@ static ssize_t show_immediate(struct device *dev,
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
u32 *dir;
const u32 *dir;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
@ -226,10 +294,10 @@ static ssize_t show_text_leaf(struct device *dev,
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
u32 *dir, *block = NULL, *p, *end;
int length, key, value, last_key = 0, ret = -ENOENT;
char *b;
const u32 *dir;
size_t bufsize;
char dummy_buf[2];
int ret;
down_read(&fw_device_rwsem);
@ -238,40 +306,23 @@ static ssize_t show_text_leaf(struct device *dev,
else
dir = fw_device(dev)->config_rom + 5;
fw_csr_iterator_init(&ci, dir);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (attr->key == last_key &&
key == (CSR_DESCRIPTOR | CSR_LEAF))
block = ci.p - 1 + value;
last_key = key;
if (buf) {
bufsize = PAGE_SIZE - 1;
} else {
buf = dummy_buf;
bufsize = 1;
}
if (block == NULL)
goto out;
ret = fw_csr_string(dir, attr->key, buf, bufsize);
length = min(block[0] >> 16, 256U);
if (length < 3)
goto out;
if (block[1] != 0 || block[2] != 0)
/* Unknown encoding. */
goto out;
if (buf == NULL) {
ret = length * 4;
goto out;
if (ret >= 0) {
/* Strip trailing whitespace and add newline. */
while (ret > 0 && isspace(buf[ret - 1]))
ret--;
strcpy(buf + ret, "\n");
ret++;
}
b = buf;
end = &block[length + 1];
for (p = &block[3]; p < end; p++, b += 4)
* (u32 *) b = (__force u32) __cpu_to_be32(*p);
/* Strip trailing whitespace and add newline. */
while (b--, (isspace(*b) || *b == '\0') && b > buf);
strcpy(b + 1, "\n");
ret = b + 2 - buf;
out:
up_read(&fw_device_rwsem);
return ret;
@ -371,7 +422,7 @@ static ssize_t guid_show(struct device *dev,
return ret;
}
static int units_sprintf(char *buf, u32 *directory)
static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@ -441,28 +492,29 @@ static int read_rom(struct fw_device *device,
return rcode;
}
#define READ_BIB_ROM_SIZE 256
#define READ_BIB_STACK_SIZE 16
#define MAX_CONFIG_ROM_SIZE 256
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
* generation changes under us, read_bus_info_block will fail and get retried.
* generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
*/
static int read_bus_info_block(struct fw_device *device, int generation)
static int read_config_rom(struct fw_device *device, int generation)
{
u32 *rom, *stack, *old_rom, *new_rom;
const u32 *old_rom, *new_rom;
u32 *rom, *stack;
u32 sp, key;
int i, end, length, ret = -1;
rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
if (rom == NULL)
return -ENOMEM;
stack = &rom[READ_BIB_ROM_SIZE];
stack = &rom[MAX_CONFIG_ROM_SIZE];
memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
device->max_speed = SCODE_100;
@ -529,40 +581,54 @@ static int read_bus_info_block(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
if (i >= READ_BIB_ROM_SIZE)
/*
* The reference points outside the standard
* config rom area, something's fishy.
*/
if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
goto out;
/* Read header quadlet for the block to get the length. */
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
i++;
if (end > READ_BIB_ROM_SIZE)
if (end > MAX_CONFIG_ROM_SIZE) {
/*
* This block extends outside standard config
* area (and the array we're reading it
* into). That's broken, so ignore this
* device.
* This block extends outside the config ROM which is
* a firmware bug. Ignore this whole block, i.e.
* simply set a fake block length of 0.
*/
goto out;
fw_error("skipped invalid ROM block %x at %llx\n",
rom[i],
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
rom[i] = 0;
end = i;
}
i++;
/*
* Now read in the block. If this is a directory
* block, check the entries as we read them to see if
* it references another block, and push it in that case.
*/
while (i < end) {
for (; i < end; i++) {
if (read_rom(device, generation, i, &rom[i]) !=
RCODE_COMPLETE)
goto out;
if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
sp < READ_BIB_STACK_SIZE)
stack[sp++] = i + rom[i];
i++;
if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
continue;
/*
* Offset points outside the ROM. May be a firmware
* bug or an Extended ROM entry (IEEE 1212-2001 clause
* 7.7.18). Simply overwrite this pointer here by a
* fake immediate entry so that later iterators over
* the ROM don't have to check offsets all the time.
*/
if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
fw_error("skipped unsupported ROM entry %x at %llx\n",
rom[i],
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
rom[i] = 0;
continue;
}
stack[sp++] = i + rom[i];
}
if (length < i)
length = i;
@ -905,7 +971,7 @@ static void fw_device_init(struct work_struct *work)
* device.
*/
if (read_bus_info_block(device, device->generation) < 0) {
if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
@ -1022,7 +1088,7 @@ enum {
};
/* Reread and compare bus info block and header of root directory */
static int reread_bus_info_block(struct fw_device *device, int generation)
static int reread_config_rom(struct fw_device *device, int generation)
{
u32 q;
int i;
@ -1048,7 +1114,7 @@ static void fw_device_refresh(struct work_struct *work)
struct fw_card *card = device->card;
int node_id = device->node_id;
switch (reread_bus_info_block(device, device->generation)) {
switch (reread_config_rom(device, device->generation)) {
case REREAD_BIB_ERROR:
if (device->config_rom_retries < MAX_RETRIES / 2 &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
@ -1082,7 +1148,7 @@ static void fw_device_refresh(struct work_struct *work)
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
if (read_bus_info_block(device, device->generation) < 0) {
if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;

View file

@ -921,23 +921,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
unsigned long long bus_time;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
switch (reg) {
case CSR_CYCLE_TIME:
case CSR_BUS_TIME:
if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
rcode = RCODE_TYPE_ERROR;
break;
}
bus_time = card->driver->get_bus_time(card);
if (reg == CSR_CYCLE_TIME)
*data = cpu_to_be32(bus_time);
if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
*data = cpu_to_be32(card->driver->get_cycle_time(card));
else
*data = cpu_to_be32(bus_time >> 25);
rcode = RCODE_TYPE_ERROR;
break;
case CSR_BROADCAST_CHANNEL:
@ -968,6 +960,9 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_BUSY_TIMEOUT:
/* FIXME: Implement this. */
case CSR_BUS_TIME:
/* Useless without initialization by the bus manager. */
default:
rcode = RCODE_ADDRESS_ERROR;
break;

View file

@ -70,7 +70,7 @@ struct fw_card_driver {
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
u64 (*get_bus_time)(struct fw_card *card);
u32 (*get_cycle_time)(struct fw_card *card);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,

View file

@ -38,7 +38,6 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <asm/atomic.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/system.h>
@ -73,20 +72,6 @@ struct descriptor {
__le16 transfer_status;
} __attribute__((aligned(16)));
struct db_descriptor {
__le16 first_size;
__le16 control;
__le16 second_req_count;
__le16 first_req_count;
__le32 branch_address;
__le16 second_res_count;
__le16 first_res_count;
__le32 reserved0;
__le32 first_buffer;
__le32 second_buffer;
__le32 reserved1;
} __attribute__((aligned(16)));
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
#define COMMAND_PTR(regs) ((regs) + 12)
@ -181,31 +166,16 @@ struct fw_ohci {
struct fw_card card;
__iomem char *registers;
dma_addr_t self_id_bus;
__le32 *self_id_cpu;
struct tasklet_struct bus_reset_tasklet;
int node_id;
int generation;
int request_generation; /* for timestamping incoming requests */
atomic_t bus_seconds;
bool use_dualbuffer;
bool old_uninorth;
bool bus_reset_packet_quirk;
unsigned quirks;
/*
* Spinlock for accessing fw_ohci data. Never call out of
* this driver with this lock held.
*/
spinlock_t lock;
u32 self_id_buffer[512];
/* Config rom buffers */
__be32 *config_rom;
dma_addr_t config_rom_bus;
__be32 *next_config_rom;
dma_addr_t next_config_rom_bus;
__be32 next_header;
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
@ -217,6 +187,18 @@ struct fw_ohci {
u64 ir_context_channels;
u32 ir_context_mask;
struct iso_context *ir_context_list;
__be32 *config_rom;
dma_addr_t config_rom_bus;
__be32 *next_config_rom;
dma_addr_t next_config_rom_bus;
__be32 next_header;
__le32 *self_id_cpu;
dma_addr_t self_id_bus;
struct tasklet_struct bus_reset_tasklet;
u32 self_id_buffer[512];
};
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
@ -249,6 +231,30 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, flags;
} ohci_quirks[] = {
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
};
/* This overrides anything that was found in ohci_quirks[]. */
static int param_quirks;
module_param_named(quirks, param_quirks, int, 0644);
MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
")");
#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
#define OHCI_PARAM_DEBUG_AT_AR 1
@ -275,7 +281,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@ -285,7 +291,6 @@ static void log_irqs(u32 evt)
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
@ -293,8 +298,7 @@ static void log_irqs(u32 evt)
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
OHCI1394_cycleInconsistent |
OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
@ -524,7 +528,7 @@ static void ar_context_release(struct ar_context *ctx)
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
#define cond_le32_to_cpu(v) \
(ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
(ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
#else
#define cond_le32_to_cpu(v) le32_to_cpu(v)
#endif
@ -605,7 +609,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* at a slightly incorrect time (in bus_reset_tasklet).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!ohci->bus_reset_packet_quirk)
if (!(ohci->quirks & QUIRK_RESET_PACKET))
ohci->request_generation = (p.header[2] >> 16) & 0xff;
} else if (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
@ -1329,7 +1333,7 @@ static void bus_reset_tasklet(unsigned long data)
context_stop(&ohci->at_response_ctx);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
if (ohci->bus_reset_packet_quirk)
if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
/*
@ -1384,7 +1388,7 @@ static void bus_reset_tasklet(unsigned long data)
static irqreturn_t irq_handler(int irq, void *data)
{
struct fw_ohci *ohci = data;
u32 event, iso_event, cycle_time;
u32 event, iso_event;
int i;
event = reg_read(ohci, OHCI1394_IntEventClear);
@ -1454,12 +1458,6 @@ static irqreturn_t irq_handler(int irq, void *data)
fw_notify("isochronous cycle inconsistent\n");
}
if (event & OHCI1394_cycle64Seconds) {
cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if ((cycle_time & 0x80000000) == 0)
atomic_inc(&ohci->bus_seconds);
}
return IRQ_HANDLED;
}
@ -1553,8 +1551,7 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
OHCI1394_cycleInconsistent |
OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
OHCI1394_masterIntEnable);
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
@ -1794,16 +1791,61 @@ static int ohci_enable_phys_dma(struct fw_card *card,
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
static u64 ohci_get_bus_time(struct fw_card *card)
static u32 cycle_timer_ticks(u32 cycle_timer)
{
u32 ticks;
ticks = cycle_timer & 0xfff;
ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
ticks += (3072 * 8000) * (cycle_timer >> 25);
return ticks;
}
/*
* Some controllers exhibit one or more of the following bugs when updating the
* iso cycle timer register:
* - When the lowest six bits are wrapping around to zero, a read that happens
* at the same time will return garbage in the lowest ten bits.
* - When the cycleOffset field wraps around to zero, the cycleCount field is
* not incremented for about 60 ns.
* - Occasionally, the entire register reads zero.
*
* To catch these, we read the register three times and ensure that the
* difference between each two consecutive reads is approximately the same, i.e.
* less than twice the other. Furthermore, any negative difference indicates an
* error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
* execute, so we have enough precision to compute the ratio of the differences.)
*/
static u32 ohci_get_cycle_time(struct fw_card *card)
{
struct fw_ohci *ohci = fw_ohci(card);
u32 cycle_time;
u64 bus_time;
u32 c0, c1, c2;
u32 t0, t1, t2;
s32 diff01, diff12;
int i;
cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
return bus_time;
if (ohci->quirks & QUIRK_CYCLE_TIMER) {
i = 0;
c1 = c2;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
do {
c0 = c1;
c1 = c2;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
t0 = cycle_timer_ticks(c0);
t1 = cycle_timer_ticks(c1);
t2 = cycle_timer_ticks(c2);
diff01 = t1 - t0;
diff12 = t2 - t1;
} while ((diff01 <= 0 || diff12 <= 0 ||
diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
&& i++ < 20);
}
return c2;
}
static void copy_iso_headers(struct iso_context *ctx, void *p)
@ -1828,52 +1870,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p)
ctx->header_length += ctx->base.header_size;
}
static int handle_ir_dualbuffer_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
struct db_descriptor *db = (struct db_descriptor *) d;
__le32 *ir_header;
size_t header_length;
void *p, *end;
if (db->first_res_count != 0 && db->second_res_count != 0) {
if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
/* This descriptor isn't done yet, stop iteration. */
return 0;
}
ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
}
header_length = le16_to_cpu(db->first_req_count) -
le16_to_cpu(db->first_res_count);
p = db + 1;
end = p + header_length;
while (p < end) {
copy_iso_headers(ctx, p);
ctx->excess_bytes +=
(le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
p += max(ctx->base.header_size, (size_t)8);
}
ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
le16_to_cpu(db->second_res_count);
if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
ir_header = (__le32 *) (db + 1);
ctx->base.callback(&ctx->base,
le32_to_cpu(ir_header[0]) & 0xffff,
ctx->header_length, ctx->header,
ctx->base.callback_data);
ctx->header_length = 0;
}
return 1;
}
static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *d,
struct descriptor *last)
@ -1960,10 +1956,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
if (ohci->use_dualbuffer)
callback = handle_ir_dualbuffer_packet;
else
callback = handle_ir_packet_per_buffer;
callback = handle_ir_packet_per_buffer;
}
spin_lock_irqsave(&ohci->lock, flags);
@ -2026,8 +2019,6 @@ static int ohci_start_iso(struct fw_iso_context *base,
} else {
index = ctx - ohci->ir_context_list;
control = IR_CONTEXT_ISOCH_HEADER;
if (ohci->use_dualbuffer)
control |= IR_CONTEXT_DUAL_BUFFER_MODE;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
match |= (cycle & 0x07fff) << 12;
@ -2188,92 +2179,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0;
}
static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct db_descriptor *db = NULL;
struct descriptor *d;
struct fw_iso_packet *p;
dma_addr_t d_bus, page_bus;
u32 z, header_z, length, rest;
int page, offset, packet_count, header_size;
/*
* FIXME: Cycle lost behavior should be configurable: lose
* packet, retransmit or terminate..
*/
p = packet;
z = 2;
/*
* The OHCI controller puts the isochronous header and trailer in the
* buffer, so we need at least 8 bytes.
*/
packet_count = p->header_length / ctx->base.header_size;
header_size = packet_count * max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
page = payload >> PAGE_SHIFT;
offset = payload & ~PAGE_MASK;
rest = p->payload_length;
/*
* The controllers I've tested have not worked correctly when
* second_req_count is zero. Rather than do something we know won't
* work, return an error
*/
if (rest == 0)
return -EINVAL;
while (rest > 0) {
d = context_get_descriptors(&ctx->context,
z + header_z, &d_bus);
if (d == NULL)
return -ENOMEM;
db = (struct db_descriptor *) d;
db->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS);
db->first_size =
cpu_to_le16(max(ctx->base.header_size, (size_t)8));
if (p->skip && rest == p->payload_length) {
db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
db->first_req_count = db->first_size;
} else {
db->first_req_count = cpu_to_le16(header_size);
}
db->first_res_count = db->first_req_count;
db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
if (p->skip && rest == p->payload_length)
length = 4;
else if (offset + rest < PAGE_SIZE)
length = rest;
else
length = PAGE_SIZE - offset;
db->second_req_count = cpu_to_le16(length);
db->second_res_count = db->second_req_count;
page_bus = page_private(buffer->pages[page]);
db->second_buffer = cpu_to_le32(page_bus + offset);
if (p->interrupt && length == rest)
db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
context_append(&ctx->context, d, z, header_z);
offset = (offset + length) & ~PAGE_MASK;
rest -= length;
if (offset == 0)
page++;
}
return 0;
}
static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@ -2364,9 +2269,6 @@ static int ohci_queue_iso(struct fw_iso_context *base,
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
else if (ctx->context.ohci->use_dualbuffer)
ret = ohci_queue_iso_receive_dualbuffer(base, packet,
buffer, payload);
else
ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer, payload);
@ -2383,7 +2285,7 @@ static const struct fw_card_driver ohci_driver = {
.send_response = ohci_send_response,
.cancel_packet = ohci_cancel_packet,
.enable_phys_dma = ohci_enable_phys_dma,
.get_bus_time = ohci_get_bus_time,
.get_cycle_time = ohci_get_cycle_time,
.allocate_iso_context = ohci_allocate_iso_context,
.free_iso_context = ohci_free_iso_context,
@ -2421,17 +2323,13 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int err;
int i, err, n_ir, n_it;
size_t size;
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@ -2472,36 +2370,15 @@ static int __devinit pci_probe(struct pci_dev *dev,
goto fail_iomem;
}
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
#if 0
/* FIXME: make it a context option or remove dual-buffer mode */
ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
#endif
/* dual-buffer mode is broken if more than one IR context is active */
if (dev->vendor == PCI_VENDOR_ID_AGERE &&
dev->device == PCI_DEVICE_ID_AGERE_FW643)
ohci->use_dualbuffer = false;
/* dual-buffer mode is broken */
if (dev->vendor == PCI_VENDOR_ID_RICOH &&
dev->device == PCI_DEVICE_ID_RICOH_R5C832)
ohci->use_dualbuffer = false;
/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
#if !defined(CONFIG_X86_32)
/* dual-buffer mode is broken with descriptor addresses above 2G */
if (dev->vendor == PCI_VENDOR_ID_TI &&
(dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
ohci->use_dualbuffer = false;
#endif
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
#endif
ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
if (ohci_quirks[i].vendor == dev->vendor &&
(ohci_quirks[i].device == dev->device ||
ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
ohci->quirks = ohci_quirks[i].flags;
break;
}
if (param_quirks)
ohci->quirks = param_quirks;
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@ -2516,17 +2393,19 @@ static int __devinit pci_probe(struct pci_dev *dev,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
ohci->ir_context_channels = ~0ULL;
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
n_ir = hweight32(ohci->ir_context_mask);
size = sizeof(struct iso_context) * n_ir;
ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
n_it = hweight32(ohci->it_context_mask);
size = sizeof(struct iso_context) * n_it;
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
err = -ENOMEM;
@ -2553,8 +2432,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (err)
goto fail_self_id;
fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
dev_name(&dev->dev), version >> 16, version & 0xff);
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
"%d IR + %d IT contexts, quirks 0x%x\n",
dev_name(&dev->dev), version >> 16, version & 0xff,
n_ir, n_it, ohci->quirks);
return 0;
@ -2662,7 +2544,7 @@ static int pci_resume(struct pci_dev *dev)
}
#endif
static struct pci_device_id pci_table[] = {
static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
{ }
};

View file

@ -1014,7 +1014,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
return 0;
}
static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@ -1027,7 +1028,7 @@ static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
return 0;
}
static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
u32 *model, u32 *firmware_revision)
{
struct fw_csr_iterator ci;

View file

@ -239,47 +239,18 @@ static const struct fw_address_region fcp_region = {
};
/* Adjust the template string if models with longer names appear. */
#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4))
static size_t model_name(u32 *directory, __be32 *buffer)
{
struct fw_csr_iterator ci;
int i, length, key, value, last_key = 0;
u32 *block = NULL;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (last_key == CSR_MODEL &&
key == (CSR_DESCRIPTOR | CSR_LEAF))
block = ci.p - 1 + value;
last_key = key;
}
if (block == NULL)
return 0;
length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN);
if (length <= 0)
return 0;
/* fast-forward to text string */
block += 3;
for (i = 0; i < length; i++)
buffer[i] = cpu_to_be32(block[i]);
return length * 4;
}
#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????")
static int node_probe(struct device *dev)
{
struct firedtv *fdtv;
__be32 name[MAX_MODEL_NAME_LEN];
char name[MAX_MODEL_NAME_LEN];
int name_len, err;
name_len = model_name(fw_unit(dev)->directory, name);
name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL,
name, sizeof(name));
fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len);
fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0);
if (!fdtv)
return -ENOMEM;

View file

@ -248,13 +248,20 @@ union fw_cdev_event {
#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
/* available since kernel version 2.6.34 */
#define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2)
/*
* FW_CDEV_VERSION History
* 1 (2.6.22) - initial version
* 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
* &fw_cdev_create_iso_context.header_size is 8 or more
* (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt
* (2.6.33) - IR has always packet-per-buffer semantics now, not one of
* dual-buffer or packet-per-buffer depending on hardware
* 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable
*/
#define FW_CDEV_VERSION 2
#define FW_CDEV_VERSION 3
/**
* struct fw_cdev_get_info - General purpose information ioctl
@ -544,20 +551,43 @@ struct fw_cdev_stop_iso {
/**
* struct fw_cdev_get_cycle_timer - read cycle timer register
* @local_time: system time, in microseconds since the Epoch
* @cycle_timer: isochronous cycle timer, as per OHCI 1.1 clause 5.13
* @cycle_timer: Cycle Time register contents
*
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
* and also the system clock. This allows to express the receive time of an
* isochronous packet as a system time with microsecond accuracy.
* and also the system clock (%CLOCK_REALTIME). This allows to express the
* receive time of an isochronous packet as a system time.
*
* @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
* 12 bits cycleOffset, in host byte order.
* 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register
* per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394.
*
* In version 1 and 2 of the ABI, this ioctl returned unreliable (non-
* monotonic) @cycle_timer values on certain controllers.
*/
struct fw_cdev_get_cycle_timer {
__u64 local_time;
__u32 cycle_timer;
};
/**
* struct fw_cdev_get_cycle_timer2 - read cycle timer register
* @tv_sec: system time, seconds
* @tv_nsec: system time, sub-seconds part in nanoseconds
* @clk_id: input parameter, clock from which to get the system time
* @cycle_timer: Cycle Time register contents
*
* The %FW_CDEV_IOC_GET_CYCLE_TIMER2 works like
* %FW_CDEV_IOC_GET_CYCLE_TIMER but lets you choose a clock like with POSIX'
* clock_gettime function. Supported @clk_id values are POSIX' %CLOCK_REALTIME
* and %CLOCK_MONOTONIC and Linux' %CLOCK_MONOTONIC_RAW.
*/
struct fw_cdev_get_cycle_timer2 {
__s64 tv_sec;
__s32 tv_nsec;
__s32 clk_id;
__u32 cycle_timer;
};
/**
* struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
* @closure: Passed back to userspace in correponding iso resource events

View file

@ -65,12 +65,13 @@
#define CSR_DIRECTORY_ID 0x20
struct fw_csr_iterator {
u32 *p;
u32 *end;
const u32 *p;
const u32 *end;
};
void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
extern struct bus_type fw_bus_type;
@ -162,7 +163,7 @@ struct fw_device {
struct mutex client_list_mutex;
struct list_head client_list;
u32 *config_rom;
const u32 *config_rom;
size_t config_rom_length;
int config_rom_retries;
unsigned is_local:1;
@ -204,7 +205,7 @@ int fw_device_enable_phys_dma(struct fw_device *device);
*/
struct fw_unit {
struct device device;
u32 *directory;
const u32 *directory;
struct fw_attribute_group attribute_group;
};

View file

@ -770,7 +770,6 @@
#define PCI_VENDOR_ID_TI 0x104c
#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
#define PCI_DEVICE_ID_TI_4450 0x8011
#define PCI_DEVICE_ID_TI_TSB43AB22 0x8023
#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034