Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (48 commits)
  [SCSI] aacraid: do not set valid bit in sense information
  [SCSI] ses: add new Enclosure ULD
  [SCSI] enclosure: add support for enclosure services
  [SCSI] sr: fix test unit ready responses
  [SCSI] u14-34f: fix data direction bug
  [SCSI] aacraid: pci_set_dma_max_seg_size opened up for late model controllers
  [SCSI] fix BUG when sum(scatterlist) > bufflen
  [SCSI] arcmsr: updates (1.20.00.15)
  [SCSI] advansys: make 3 functions static
  [SCSI] Small cleanups for scsi_host.h
  [SCSI] dc395x: fix uninitialized var warning
  [SCSI] NCR53C9x: remove driver
  [SCSI] remove m68k NCR53C9x based drivers
  [SCSI] dec_esp: Remove driver
  [SCSI] kernel-doc: fix scsi docbook
  [SCSI] update my email address
  [SCSI] add protocol definitions
  [SCSI] sd: handle bad lba in sense information
  [SCSI] qla2xxx: Update version number to 8.02.00-k8.
  [SCSI] qla2xxx: Correct issue where incorrect init-fw mailbox command was used on non-NPIV capable ISPs.
  ...
This commit is contained in:
Linus Torvalds 2008-02-07 17:30:44 -08:00
commit c8b6de16d9
61 changed files with 2515 additions and 10055 deletions

View file

@ -12,7 +12,7 @@
<surname>Bottomley</surname>
<affiliation>
<address>
<email>James.Bottomley@steeleye.com</email>
<email>James.Bottomley@hansenpartnership.com</email>
</address>
</affiliation>
</author>

View file

@ -68,4 +68,45 @@
** 2. modify the arcmsr_pci_slot_reset function
** 3. modify the arcmsr_pci_ers_disconnect_forepart function
** 4. modify the arcmsr_pci_ers_need_reset_forepart function
** 1.20.00.15 09/27/2007 Erich Chen & Nick Cheng
** 1. add arcmsr_enable_eoi_mode() on adapter Type B
** 2. add readl(reg->iop2drv_doorbell_reg) in arcmsr_handle_hbb_isr()
** in case of the doorbell interrupt clearance is cached
** 1.20.00.15 10/01/2007 Erich Chen & Nick Cheng
** 1. modify acb->devstate[i][j]
** as ARECA_RAID_GOOD instead of
** ARECA_RAID_GONE in arcmsr_alloc_ccb_pool
** 1.20.00.15 11/06/2007 Erich Chen & Nick Cheng
** 1. add conditional declaration for
** arcmsr_pci_error_detected() and
** arcmsr_pci_slot_reset
** 1.20.00.15 11/23/2007 Erich Chen & Nick Cheng
** 1.check if the sg list member number
** exceeds arcmsr default limit in arcmsr_build_ccb()
** 2.change the returned value type of arcmsr_build_ccb()
** from "void" to "int"
** 3.add the conditional check if arcmsr_build_ccb()
** returns FAILED
** 1.20.00.15 12/04/2007 Erich Chen & Nick Cheng
** 1. modify arcmsr_drain_donequeue() to ignore unknown
** command and let kernel process command timeout.
** This could handle IO request violating max. segments
** while Linux XFS over DM-CRYPT.
** Thanks to Milan Broz's comments <mbroz@redhat.com>
** 1.20.00.15 12/24/2007 Erich Chen & Nick Cheng
** 1.fix the portability problems
** 2.fix type B where we should _not_ iounmap() acb->pmu;
** it's not ioremapped.
** 3.add return -ENOMEM if ioremap() fails
** 4.transfer IS_SG64_ADDR w/ cpu_to_le32()
** in arcmsr_build_ccb
** 5. modify acb->devstate[i][j] as ARECA_RAID_GONE instead of
** ARECA_RAID_GOOD in arcmsr_alloc_ccb_pool()
** 6.fix arcmsr_cdb->Context as (unsigned long)arcmsr_cdb
** 7.add the checking state of
** (outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT) == 0
** in arcmsr_handle_hba_isr
** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer()
** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool()
** 10.fix the arcmsr_polling_hbb_ccbdone()
**************************************************************************

View file

@ -1407,7 +1407,7 @@ Credits
=======
The following people have contributed to this document:
Mike Anderson <andmike at us dot ibm dot com>
James Bottomley <James dot Bottomley at steeleye dot com>
James Bottomley <James dot Bottomley at hansenpartnership dot com>
Patrick Mansfield <patmans at us dot ibm dot com>
Christoph Hellwig <hch at infradead dot org>
Doug Ledford <dledford at redhat dot com>

View file

@ -285,4 +285,13 @@ config INTEL_MENLOW
If unsure, say N.
config ENCLOSURE_SERVICES
tristate "Enclosure Services"
default n
help
Provides support for intelligent enclosures (bays which
contain storage devices). You also need either a host
driver (SCSI/ATA) which supports enclosures
or a SCSI enclosure device (SES) to use these services.
endif # MISC_DEVICES

View file

@ -20,3 +20,4 @@ obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o

484
drivers/misc/enclosure.c Normal file
View file

@ -0,0 +1,484 @@
/*
* Enclosure Services
*
* Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the GNU General Public License
** version 2 as published by the Free Software Foundation.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#include <linux/device.h>
#include <linux/enclosure.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
static LIST_HEAD(container_list);
static DEFINE_MUTEX(container_list_lock);
static struct class enclosure_class;
static struct class enclosure_component_class;
/**
* enclosure_find - find an enclosure given a device
* @dev: the device to find for
*
* Looks through the list of registered enclosures to see
* if it can find a match for a device. Returns NULL if no
* enclosure is found. Obtains a reference to the enclosure class
* device which must be released with class_device_put().
*/
struct enclosure_device *enclosure_find(struct device *dev)
{
struct enclosure_device *edev = NULL;
mutex_lock(&container_list_lock);
list_for_each_entry(edev, &container_list, node) {
if (edev->cdev.dev == dev) {
class_device_get(&edev->cdev);
mutex_unlock(&container_list_lock);
return edev;
}
}
mutex_unlock(&container_list_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(enclosure_find);
/**
* enclosure_for_each_device - calls a function for each enclosure
* @fn: the function to call
* @data: the data to pass to each call
*
* Loops over all the enclosures calling the function.
*
* Note, this function uses a mutex which will be held across calls to
* @fn, so it must have non atomic context, and @fn may (although it
* should not) sleep or otherwise cause the mutex to be held for
* indefinite periods
*/
int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
void *data)
{
int error = 0;
struct enclosure_device *edev;
mutex_lock(&container_list_lock);
list_for_each_entry(edev, &container_list, node) {
error = fn(edev, data);
if (error)
break;
}
mutex_unlock(&container_list_lock);
return error;
}
EXPORT_SYMBOL_GPL(enclosure_for_each_device);
/**
* enclosure_register - register device as an enclosure
*
* @dev: device containing the enclosure
* @components: number of components in the enclosure
*
* This sets up the device for being an enclosure. Note that @dev does
* not have to be a dedicated enclosure device. It may be some other type
* of device that additionally responds to enclosure services
*/
struct enclosure_device *
enclosure_register(struct device *dev, const char *name, int components,
struct enclosure_component_callbacks *cb)
{
struct enclosure_device *edev =
kzalloc(sizeof(struct enclosure_device) +
sizeof(struct enclosure_component)*components,
GFP_KERNEL);
int err, i;
BUG_ON(!cb);
if (!edev)
return ERR_PTR(-ENOMEM);
edev->components = components;
edev->cdev.class = &enclosure_class;
edev->cdev.dev = get_device(dev);
edev->cb = cb;
snprintf(edev->cdev.class_id, BUS_ID_SIZE, "%s", name);
err = class_device_register(&edev->cdev);
if (err)
goto err;
for (i = 0; i < components; i++)
edev->component[i].number = -1;
mutex_lock(&container_list_lock);
list_add_tail(&edev->node, &container_list);
mutex_unlock(&container_list_lock);
return edev;
err:
put_device(edev->cdev.dev);
kfree(edev);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(enclosure_register);
static struct enclosure_component_callbacks enclosure_null_callbacks;
/**
* enclosure_unregister - remove an enclosure
*
* @edev: the registered enclosure to remove;
*/
void enclosure_unregister(struct enclosure_device *edev)
{
int i;
mutex_lock(&container_list_lock);
list_del(&edev->node);
mutex_unlock(&container_list_lock);
for (i = 0; i < edev->components; i++)
if (edev->component[i].number != -1)
class_device_unregister(&edev->component[i].cdev);
/* prevent any callbacks into service user */
edev->cb = &enclosure_null_callbacks;
class_device_unregister(&edev->cdev);
}
EXPORT_SYMBOL_GPL(enclosure_unregister);
static void enclosure_release(struct class_device *cdev)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
put_device(cdev->dev);
kfree(edev);
}
static void enclosure_component_release(struct class_device *cdev)
{
if (cdev->dev)
put_device(cdev->dev);
class_device_put(cdev->parent);
}
/**
* enclosure_component_register - add a particular component to an enclosure
* @edev: the enclosure to add the component
* @num: the device number
* @type: the type of component being added
* @name: an optional name to appear in sysfs (leave NULL if none)
*
* Registers the component. The name is optional for enclosures that
* give their components a unique name. If not, leave the field NULL
* and a name will be assigned.
*
* Returns a pointer to the enclosure component or an error.
*/
struct enclosure_component *
enclosure_component_register(struct enclosure_device *edev,
unsigned int number,
enum enclosure_component_type type,
const char *name)
{
struct enclosure_component *ecomp;
struct class_device *cdev;
int err;
if (number >= edev->components)
return ERR_PTR(-EINVAL);
ecomp = &edev->component[number];
if (ecomp->number != -1)
return ERR_PTR(-EINVAL);
ecomp->type = type;
ecomp->number = number;
cdev = &ecomp->cdev;
cdev->parent = class_device_get(&edev->cdev);
cdev->class = &enclosure_component_class;
if (name)
snprintf(cdev->class_id, BUS_ID_SIZE, "%s", name);
else
snprintf(cdev->class_id, BUS_ID_SIZE, "%u", number);
err = class_device_register(cdev);
if (err)
ERR_PTR(err);
return ecomp;
}
EXPORT_SYMBOL_GPL(enclosure_component_register);
/**
* enclosure_add_device - add a device as being part of an enclosure
* @edev: the enclosure device being added to.
* @num: the number of the component
* @dev: the device being added
*
* Declares a real device to reside in slot (or identifier) @num of an
* enclosure. This will cause the relevant sysfs links to appear.
* This function may also be used to change a device associated with
* an enclosure without having to call enclosure_remove_device() in
* between.
*
* Returns zero on success or an error.
*/
int enclosure_add_device(struct enclosure_device *edev, int component,
struct device *dev)
{
struct class_device *cdev;
if (!edev || component >= edev->components)
return -EINVAL;
cdev = &edev->component[component].cdev;
class_device_del(cdev);
if (cdev->dev)
put_device(cdev->dev);
cdev->dev = get_device(dev);
return class_device_add(cdev);
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
/**
* enclosure_remove_device - remove a device from an enclosure
* @edev: the enclosure device
* @num: the number of the component to remove
*
* Returns zero on success or an error.
*
*/
int enclosure_remove_device(struct enclosure_device *edev, int component)
{
struct class_device *cdev;
if (!edev || component >= edev->components)
return -EINVAL;
cdev = &edev->component[component].cdev;
class_device_del(cdev);
if (cdev->dev)
put_device(cdev->dev);
cdev->dev = NULL;
return class_device_add(cdev);
}
EXPORT_SYMBOL_GPL(enclosure_remove_device);
/*
* sysfs pieces below
*/
static ssize_t enclosure_show_components(struct class_device *cdev, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
return snprintf(buf, 40, "%d\n", edev->components);
}
static struct class_device_attribute enclosure_attrs[] = {
__ATTR(components, S_IRUGO, enclosure_show_components, NULL),
__ATTR_NULL
};
static struct class enclosure_class = {
.name = "enclosure",
.owner = THIS_MODULE,
.release = enclosure_release,
.class_dev_attrs = enclosure_attrs,
};
static const char *const enclosure_status [] = {
[ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
[ENCLOSURE_STATUS_OK] = "OK",
[ENCLOSURE_STATUS_CRITICAL] = "critical",
[ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical",
[ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable",
[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
};
static const char *const enclosure_type [] = {
[ENCLOSURE_COMPONENT_DEVICE] = "device",
[ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
};
static ssize_t get_component_fault(struct class_device *cdev, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_fault)
edev->cb->get_fault(edev, ecomp);
return snprintf(buf, 40, "%d\n", ecomp->fault);
}
static ssize_t set_component_fault(struct class_device *cdev, const char *buf,
size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int val = simple_strtoul(buf, NULL, 0);
if (edev->cb->set_fault)
edev->cb->set_fault(edev, ecomp, val);
return count;
}
static ssize_t get_component_status(struct class_device *cdev, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_status)
edev->cb->get_status(edev, ecomp);
return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]);
}
static ssize_t set_component_status(struct class_device *cdev, const char *buf,
size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int i;
for (i = 0; enclosure_status[i]; i++) {
if (strncmp(buf, enclosure_status[i],
strlen(enclosure_status[i])) == 0 &&
(buf[strlen(enclosure_status[i])] == '\n' ||
buf[strlen(enclosure_status[i])] == '\0'))
break;
}
if (enclosure_status[i] && edev->cb->set_status) {
edev->cb->set_status(edev, ecomp, i);
return count;
} else
return -EINVAL;
}
static ssize_t get_component_active(struct class_device *cdev, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_active)
edev->cb->get_active(edev, ecomp);
return snprintf(buf, 40, "%d\n", ecomp->active);
}
static ssize_t set_component_active(struct class_device *cdev, const char *buf,
size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int val = simple_strtoul(buf, NULL, 0);
if (edev->cb->set_active)
edev->cb->set_active(edev, ecomp, val);
return count;
}
static ssize_t get_component_locate(struct class_device *cdev, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_locate)
edev->cb->get_locate(edev, ecomp);
return snprintf(buf, 40, "%d\n", ecomp->locate);
}
static ssize_t set_component_locate(struct class_device *cdev, const char *buf,
size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int val = simple_strtoul(buf, NULL, 0);
if (edev->cb->set_locate)
edev->cb->set_locate(edev, ecomp, val);
return count;
}
static ssize_t get_component_type(struct class_device *cdev, char *buf)
{
struct enclosure_component *ecomp = to_enclosure_component(cdev);
return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
}
static struct class_device_attribute enclosure_component_attrs[] = {
__ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
set_component_fault),
__ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
set_component_status),
__ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
set_component_active),
__ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
set_component_locate),
__ATTR(type, S_IRUGO, get_component_type, NULL),
__ATTR_NULL
};
static struct class enclosure_component_class = {
.name = "enclosure_component",
.owner = THIS_MODULE,
.class_dev_attrs = enclosure_component_attrs,
.release = enclosure_component_release,
};
static int __init enclosure_init(void)
{
int err;
err = class_register(&enclosure_class);
if (err)
return err;
err = class_register(&enclosure_component_class);
if (err)
goto err_out;
return 0;
err_out:
class_unregister(&enclosure_class);
return err;
}
static void __exit enclosure_exit(void)
{
class_unregister(&enclosure_component_class);
class_unregister(&enclosure_class);
}
module_init(enclosure_init);
module_exit(enclosure_exit);
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("Enclosure Services");
MODULE_LICENSE("GPL v2");

View file

@ -179,7 +179,15 @@ config CHR_DEV_SCH
say M here and read <file:Documentation/kbuild/modules.txt> and
<file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
If unsure, say N.
config SCSI_ENCLOSURE
tristate "SCSI Enclosure Support"
depends on SCSI && ENCLOSURE_SERVICES
help
Enclosures are devices sitting on or in SCSI backplanes that
manage devices. If you have a disk cage, the chances are that
it has an enclosure device. Selecting this option will just allow
certain enclosure conditions to be reported and is not required.
comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
depends on SCSI
@ -350,17 +358,6 @@ config SGIWD93_SCSI
If you have a Western Digital WD93 SCSI controller on
an SGI MIPS system, say Y. Otherwise, say N.
config SCSI_DECNCR
tristate "DEC NCR53C94 Scsi Driver"
depends on MACH_DECSTATION && SCSI && TC
help
Say Y here to support the NCR53C94 SCSI controller chips on IOASIC
based TURBOchannel DECstations and TURBOchannel PMAZ-A cards.
config SCSI_DECSII
tristate "DEC SII Scsi Driver"
depends on MACH_DECSTATION && SCSI && 32BIT
config BLK_DEV_3W_XXXX_RAID
tristate "3ware 5/6/7/8xxx ATA-RAID support"
depends on PCI && SCSI
@ -1263,17 +1260,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
not allow targets to disconnect is not reasonable if there is more
than 1 device on a SCSI bus. The normal answer therefore is N.
config SCSI_MCA_53C9X
tristate "NCR MCA 53C9x SCSI support"
depends on MCA_LEGACY && SCSI && BROKEN_ON_SMP
help
Some MicroChannel machines, notably the NCR 35xx line, use a SCSI
controller based on the NCR 53C94. This driver will allow use of
the controller on the 3550, and very possibly others.
To compile this driver as a module, choose M here: the
module will be called mca_53c9x.
config SCSI_PAS16
tristate "PAS16 SCSI support"
depends on ISA && SCSI
@ -1600,45 +1586,6 @@ config GVP11_SCSI
To compile this driver as a module, choose M here: the
module will be called gvp11.
config CYBERSTORM_SCSI
tristate "CyberStorm SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga with an original (MkI) Phase5 Cyberstorm
accelerator board and the optional Cyberstorm SCSI controller,
answer Y. Otherwise, say N.
config CYBERSTORMII_SCSI
tristate "CyberStorm Mk II SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga with a Phase5 Cyberstorm MkII accelerator board
and the optional Cyberstorm SCSI controller, say Y. Otherwise,
answer N.
config BLZ2060_SCSI
tristate "Blizzard 2060 SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga with a Phase5 Blizzard 2060 accelerator board
and want to use the onboard SCSI controller, say Y. Otherwise,
answer N.
config BLZ1230_SCSI
tristate "Blizzard 1230IV/1260 SCSI support"
depends on ZORRO && SCSI
help
If you have an Amiga 1200 with a Phase5 Blizzard 1230IV or Blizzard
1260 accelerator, and the optional SCSI module, say Y. Otherwise,
say N.
config FASTLANE_SCSI
tristate "Fastlane SCSI support"
depends on ZORRO && SCSI
help
If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
one in the near future, say Y to this question. Otherwise, say N.
config SCSI_A4000T
tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
depends on AMIGA && SCSI && EXPERIMENTAL
@ -1666,15 +1613,6 @@ config SCSI_ZORRO7XX
accelerator card for the Amiga 1200,
- the SCSI controller on the GVP Turbo 040/060 accelerator.
config OKTAGON_SCSI
tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
depends on ZORRO && SCSI && EXPERIMENTAL
help
If you have the BSC Oktagon SCSI disk controller for the Amiga, say
Y to this question. If you're in doubt about whether you have one,
see the picture at
<http://amiga.resource.cx/exp/search.pl?product=oktagon>.
config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
@ -1727,18 +1665,6 @@ config MAC_SCSI
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI
help
This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes. If you have one of these say Y and read the
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
To compile this driver as a module, choose M here: the
module will be called mac_esp.
config MVME147_SCSI
bool "WD33C93 SCSI driver for MVME147"
depends on MVME147 && SCSI=y
@ -1779,6 +1705,7 @@ config SUN3_SCSI
config SUN3X_ESP
bool "Sun3x ESP SCSI"
depends on SUN3X && SCSI=y
select SCSI_SPI_ATTRS
help
The ESP was an on-board SCSI controller used on Sun 3/80
machines. Say Y here to compile in support for it.

View file

@ -44,15 +44,8 @@ obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
obj-$(CONFIG_CYBERSTORM_SCSI) += NCR53C9x.o cyberstorm.o
obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o
obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o
obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o
obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o
obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o
obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
@ -95,7 +88,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
obj-$(CONFIG_SCSI_MCA_53C9X) += NCR53C9x.o mca_53c9x.o
obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o
obj-$(CONFIG_SCSI_EATA) += eata.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
@ -112,13 +104,12 @@ obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_PPA) += ppa.o
obj-$(CONFIG_SCSI_IMM) += imm.o
obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
@ -138,6 +129,7 @@ obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
obj-$(CONFIG_CHR_DEV_SG) += sg.o
obj-$(CONFIG_CHR_DEV_SCH) += ch.o
obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
# This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o

File diff suppressed because it is too large Load diff

View file

@ -1,668 +0,0 @@
/* NCR53C9x.c: Defines and structures for the NCR53C9x generic driver.
*
* Originally esp.h: Defines and structures for the Sparc ESP
* (Enhanced SCSI Processor) driver under Linux.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
* Generalization by Jesper Skov (jskov@cygnus.co.uk)
*
* More generalization (for i386 stuff) by Tymm Twillman (tymm@computer.org)
*/
#ifndef NCR53C9X_H
#define NCR53C9X_H
#include <linux/interrupt.h>
/* djweis for mac driver */
#if defined(CONFIG_MAC)
#define PAD_SIZE 15
#else
#define PAD_SIZE 3
#endif
/* Handle multiple hostadapters on Amiga
* generally PAD_SIZE = 3
* but there is one exception: Oktagon (PAD_SIZE = 1) */
#if defined(CONFIG_OKTAGON_SCSI) || defined(CONFIG_OKTAGON_SCSI_MODULE)
#undef PAD_SIZE
#if defined(CONFIG_BLZ1230_SCSI) || defined(CONFIG_BLZ1230_SCSI_MODULE) || \
defined(CONFIG_BLZ2060_SCSI) || defined(CONFIG_BLZ2060_SCSI_MODULE) || \
defined(CONFIG_CYBERSTORM_SCSI) || defined(CONFIG_CYBERSTORM_SCSI_MODULE) || \
defined(CONFIG_CYBERSTORMII_SCSI) || defined(CONFIG_CYBERSTORMII_SCSI_MODULE) || \
defined(CONFIG_FASTLANE_SCSI) || defined(CONFIG_FASTLANE_SCSI_MODULE)
#define MULTIPLE_PAD_SIZES
#else
#define PAD_SIZE 1
#endif
#endif
/* Macros for debugging messages */
#define DEBUG_ESP
/* #define DEBUG_ESP_DATA */
/* #define DEBUG_ESP_QUEUE */
/* #define DEBUG_ESP_DISCONNECT */
/* #define DEBUG_ESP_STATUS */
/* #define DEBUG_ESP_PHASES */
/* #define DEBUG_ESP_WORKBUS */
/* #define DEBUG_STATE_MACHINE */
/* #define DEBUG_ESP_CMDS */
/* #define DEBUG_ESP_IRQS */
/* #define DEBUG_SDTR */
/* #define DEBUG_ESP_SG */
/* Use the following to sprinkle debugging messages in a way which
* suits you if combinations of the above become too verbose when
* trying to track down a specific problem.
*/
/* #define DEBUG_ESP_MISC */
#if defined(DEBUG_ESP)
#define ESPLOG(foo) printk foo
#else
#define ESPLOG(foo)
#endif /* (DEBUG_ESP) */
#if defined(DEBUG_ESP_DATA)
#define ESPDATA(foo) printk foo
#else
#define ESPDATA(foo)
#endif
#if defined(DEBUG_ESP_QUEUE)
#define ESPQUEUE(foo) printk foo
#else
#define ESPQUEUE(foo)
#endif
#if defined(DEBUG_ESP_DISCONNECT)
#define ESPDISC(foo) printk foo
#else
#define ESPDISC(foo)
#endif
#if defined(DEBUG_ESP_STATUS)
#define ESPSTAT(foo) printk foo
#else
#define ESPSTAT(foo)
#endif
#if defined(DEBUG_ESP_PHASES)
#define ESPPHASE(foo) printk foo
#else
#define ESPPHASE(foo)
#endif
#if defined(DEBUG_ESP_WORKBUS)
#define ESPBUS(foo) printk foo
#else
#define ESPBUS(foo)
#endif
#if defined(DEBUG_ESP_IRQS)
#define ESPIRQ(foo) printk foo
#else
#define ESPIRQ(foo)
#endif
#if defined(DEBUG_SDTR)
#define ESPSDTR(foo) printk foo
#else
#define ESPSDTR(foo)
#endif
#if defined(DEBUG_ESP_MISC)
#define ESPMISC(foo) printk foo
#else
#define ESPMISC(foo)
#endif
/*
* padding for register structure
*/
#ifdef CONFIG_JAZZ_ESP
#define EREGS_PAD(n)
#else
#ifndef MULTIPLE_PAD_SIZES
#define EREGS_PAD(n) unchar n[PAD_SIZE];
#endif
#endif
/* The ESP SCSI controllers have their register sets in three
* "classes":
*
* 1) Registers which are both read and write.
* 2) Registers which are read only.
* 3) Registers which are write only.
*
* Yet, they all live within the same IO space.
*/
#if !defined(__i386__) && !defined(__x86_64__)
#ifndef MULTIPLE_PAD_SIZES
#ifdef CONFIG_CPU_HAS_WB
#include <asm/wbflush.h>
#define esp_write(__reg, __val) do{(__reg) = (__val); wbflush();} while(0)
#else
#define esp_write(__reg, __val) ((__reg) = (__val))
#endif
#define esp_read(__reg) (__reg)
struct ESP_regs {
/* Access Description Offset */
volatile unchar esp_tclow; /* rw Low bits of the transfer count 0x00 */
EREGS_PAD(tlpad1);
volatile unchar esp_tcmed; /* rw Mid bits of the transfer count 0x04 */
EREGS_PAD(fdpad);
volatile unchar esp_fdata; /* rw FIFO data bits 0x08 */
EREGS_PAD(cbpad);
volatile unchar esp_cmnd; /* rw SCSI command bits 0x0c */
EREGS_PAD(stpad);
volatile unchar esp_status; /* ro ESP status register 0x10 */
#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
EREGS_PAD(irqpd);
volatile unchar esp_intrpt; /* ro Kind of interrupt 0x14 */
#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
EREGS_PAD(sspad);
volatile unchar esp_sstep; /* ro Sequence step register 0x18 */
#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
EREGS_PAD(ffpad);
volatile unchar esp_fflags; /* ro Bits of current FIFO info 0x1c */
#define esp_soff esp_fflags /* wo Sync offset 0x1c */
EREGS_PAD(cf1pd);
volatile unchar esp_cfg1; /* rw First configuration register 0x20 */
EREGS_PAD(cfpad);
volatile unchar esp_cfact; /* wo Clock conversion factor 0x24 */
EREGS_PAD(ctpad);
volatile unchar esp_ctest; /* wo Chip test register 0x28 */
EREGS_PAD(cf2pd);
volatile unchar esp_cfg2; /* rw Second configuration register 0x2c */
EREGS_PAD(cf3pd);
/* The following is only found on the 53C9X series SCSI chips */
volatile unchar esp_cfg3; /* rw Third configuration register 0x30 */
EREGS_PAD(cf4pd);
volatile unchar esp_cfg4; /* rw Fourth configuration register 0x34 */
EREGS_PAD(thpd);
/* The following is found on all chips except the NCR53C90 (ESP100) */
volatile unchar esp_tchi; /* rw High bits of transfer count 0x38 */
#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
EREGS_PAD(fgpad);
volatile unchar esp_fgrnd; /* rw Data base for fifo 0x3c */
};
#else /* MULTIPLE_PAD_SIZES */
#define esp_write(__reg, __val) (*(__reg) = (__val))
#define esp_read(__reg) (*(__reg))
struct ESP_regs {
unsigned char io_addr[64]; /* dummy */
/* Access Description Offset */
#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
#define esp_tcmed io_addr + (1<<(esp->shift)) /* rw Mid bits of the transfer count 0x04 */
#define esp_fdata io_addr + (2<<(esp->shift)) /* rw FIFO data bits 0x08 */
#define esp_cmnd io_addr + (3<<(esp->shift)) /* rw SCSI command bits 0x0c */
#define esp_status io_addr + (4<<(esp->shift)) /* ro ESP status register 0x10 */
#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
#define esp_intrpt io_addr + (5<<(esp->shift)) /* ro Kind of interrupt 0x14 */
#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
#define esp_sstep io_addr + (6<<(esp->shift)) /* ro Sequence step register 0x18 */
#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
#define esp_fflags io_addr + (7<<(esp->shift)) /* ro Bits of current FIFO info 0x1c */
#define esp_soff esp_fflags /* wo Sync offset 0x1c */
#define esp_cfg1 io_addr + (8<<(esp->shift)) /* rw First configuration register 0x20 */
#define esp_cfact io_addr + (9<<(esp->shift)) /* wo Clock conversion factor 0x24 */
#define esp_ctest io_addr + (10<<(esp->shift)) /* wo Chip test register 0x28 */
#define esp_cfg2 io_addr + (11<<(esp->shift)) /* rw Second configuration register 0x2c */
/* The following is only found on the 53C9X series SCSI chips */
#define esp_cfg3 io_addr + (12<<(esp->shift)) /* rw Third configuration register 0x30 */
#define esp_cfg4 io_addr + (13<<(esp->shift)) /* rw Fourth configuration register 0x34 */
/* The following is found on all chips except the NCR53C90 (ESP100) */
#define esp_tchi io_addr + (14<<(esp->shift)) /* rw High bits of transfer count 0x38 */
#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
#define esp_fgrnd io_addr + (15<<(esp->shift)) /* rw Data base for fifo 0x3c */
};
#endif
#else /* !defined(__i386__) && !defined(__x86_64__) */
#define esp_write(__reg, __val) outb((__val), (__reg))
#define esp_read(__reg) inb((__reg))
struct ESP_regs {
unsigned int io_addr;
/* Access Description Offset */
#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
#define esp_tcmed io_addr + 1 /* rw Mid bits of the transfer count 0x04 */
#define esp_fdata io_addr + 2 /* rw FIFO data bits 0x08 */
#define esp_cmnd io_addr + 3 /* rw SCSI command bits 0x0c */
#define esp_status io_addr + 4 /* ro ESP status register 0x10 */
#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
#define esp_intrpt io_addr + 5 /* ro Kind of interrupt 0x14 */
#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
#define esp_sstep io_addr + 6 /* ro Sequence step register 0x18 */
#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
#define esp_fflags io_addr + 7 /* ro Bits of current FIFO info 0x1c */
#define esp_soff esp_fflags /* wo Sync offset 0x1c */
#define esp_cfg1 io_addr + 8 /* rw First configuration register 0x20 */
#define esp_cfact io_addr + 9 /* wo Clock conversion factor 0x24 */
#define esp_ctest io_addr + 10 /* wo Chip test register 0x28 */
#define esp_cfg2 io_addr + 11 /* rw Second configuration register 0x2c */
/* The following is only found on the 53C9X series SCSI chips */
#define esp_cfg3 io_addr + 12 /* rw Third configuration register 0x30 */
#define esp_cfg4 io_addr + 13 /* rw Fourth configuration register 0x34 */
/* The following is found on all chips except the NCR53C90 (ESP100) */
#define esp_tchi io_addr + 14 /* rw High bits of transfer count 0x38 */
#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
#define esp_fgrnd io_addr + 15 /* rw Data base for fifo 0x3c */
};
#endif /* !defined(__i386__) && !defined(__x86_64__) */
/* Various revisions of the ESP board. */
enum esp_rev {
esp100 = 0x00, /* NCR53C90 - very broken */
esp100a = 0x01, /* NCR53C90A */
esp236 = 0x02,
fas236 = 0x03,
fas100a = 0x04,
fast = 0x05,
fas366 = 0x06,
fas216 = 0x07,
fsc = 0x08, /* SYM53C94-2 */
espunknown = 0x09
};
/* We allocate one of these for each scsi device and attach it to
* SDptr->hostdata for use in the driver
*/
struct esp_device {
unsigned char sync_min_period;
unsigned char sync_max_offset;
unsigned sync:1;
unsigned wide:1;
unsigned disconnect:1;
};
/* We get one of these for each ESP probed. */
struct NCR_ESP {
struct NCR_ESP *next; /* Next ESP on probed or NULL */
struct ESP_regs *eregs; /* All esp registers */
int dma; /* Who I do transfers with. */
void *dregs; /* And his registers. */
struct Scsi_Host *ehost; /* Backpointer to SCSI Host */
void *edev; /* Pointer to controller base/SBus */
int esp_id; /* Unique per-ESP ID number */
/* ESP Configuration Registers */
unsigned char config1; /* Copy of the 1st config register */
unsigned char config2; /* Copy of the 2nd config register */
unsigned char config3[16]; /* Copy of the 3rd config register */
/* The current command we are sending to the ESP chip. This esp_command
* ptr needs to be mapped in DVMA area so we can send commands and read
* from the ESP fifo without burning precious CPU cycles. Programmed I/O
* sucks when we have the DVMA to do it for us. The ESP is stupid and will
* only send out 6, 10, and 12 byte SCSI commands, others we need to send
* one byte at a time. esp_slowcmd being set says that we are doing one
* of the command types ESP doesn't understand, esp_scmdp keeps track of
* which byte we are sending, esp_scmdleft says how many bytes to go.
*/
volatile unchar *esp_command; /* Location of command (CPU view) */
__u32 esp_command_dvma; /* Location of command (DVMA view) */
unsigned char esp_clen; /* Length of this command */
unsigned char esp_slowcmd;
unsigned char *esp_scmdp;
unsigned char esp_scmdleft;
/* The following are used to determine the cause of an IRQ. Upon every
* IRQ entry we synchronize these with the hardware registers.
*/
unchar ireg; /* Copy of ESP interrupt register */
unchar sreg; /* Same for ESP status register */
unchar seqreg; /* The ESP sequence register */
/* The following is set when a premature interrupt condition is detected
* in some FAS revisions.
*/
unchar fas_premature_intr_workaround;
/* To save register writes to the ESP, which can be expensive, we
* keep track of the previous value that various registers had for
* the last target we connected to. If they are the same for the
* current target, we skip the register writes as they are not needed.
*/
unchar prev_soff, prev_stp, prev_cfg3;
/* For each target we keep track of save/restore data
* pointer information. This needs to be updated majorly
* when we add support for tagged queueing. -DaveM
*/
struct esp_pointers {
char *saved_ptr;
struct scatterlist *saved_buffer;
int saved_this_residual;
int saved_buffers_residual;
} data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/;
/* Clock periods, frequencies, synchronization, etc. */
unsigned int cfreq; /* Clock frequency in HZ */
unsigned int cfact; /* Clock conversion factor */
unsigned int ccycle; /* One ESP clock cycle */
unsigned int ctick; /* One ESP clock time */
unsigned int radelay; /* FAST chip req/ack delay */
unsigned int neg_defp; /* Default negotiation period */
unsigned int sync_defp; /* Default sync transfer period */
unsigned int max_period; /* longest our period can be */
unsigned int min_period; /* shortest period we can withstand */
/* For slow to medium speed input clock rates we shoot for 5mb/s,
* but for high input clock rates we try to do 10mb/s although I
* don't think a transfer can even run that fast with an ESP even
* with DMA2 scatter gather pipelining.
*/
#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
unsigned int snip; /* Sync. negotiation in progress */
unsigned int wnip; /* WIDE negotiation in progress */
unsigned int targets_present; /* targets spoken to before */
int current_transfer_size; /* Set at beginning of data dma */
unchar espcmdlog[32]; /* Log of current esp cmds sent. */
unchar espcmdent; /* Current entry in esp cmd log. */
/* Misc. info about this ESP */
enum esp_rev erev; /* ESP revision */
int irq; /* IRQ for this ESP */
int scsi_id; /* Who am I as initiator? */
int scsi_id_mask; /* Bitmask of 'me'. */
int diff; /* Differential SCSI bus? */
int slot; /* Slot the adapter occupies */
/* Our command queues, only one cmd lives in the current_SC queue. */
Scsi_Cmnd *issue_SC; /* Commands to be issued */
Scsi_Cmnd *current_SC; /* Who is currently working the bus */
Scsi_Cmnd *disconnected_SC; /* Commands disconnected from the bus */
/* Message goo */
unchar cur_msgout[16];
unchar cur_msgin[16];
unchar prevmsgout, prevmsgin;
unchar msgout_len, msgin_len;
unchar msgout_ctr, msgin_ctr;
/* States that we cannot keep in the per cmd structure because they
* cannot be assosciated with any specific command.
*/
unchar resetting_bus;
wait_queue_head_t reset_queue;
unchar do_pio_cmds; /* Do command transfer with pio */
/* How much bits do we have to shift the registers */
unsigned char shift;
/* Functions handling DMA
*/
/* Required functions */
int (*dma_bytes_sent)(struct NCR_ESP *, int);
int (*dma_can_transfer)(struct NCR_ESP *, Scsi_Cmnd *);
void (*dma_dump_state)(struct NCR_ESP *);
void (*dma_init_read)(struct NCR_ESP *, __u32, int);
void (*dma_init_write)(struct NCR_ESP *, __u32, int);
void (*dma_ints_off)(struct NCR_ESP *);
void (*dma_ints_on)(struct NCR_ESP *);
int (*dma_irq_p)(struct NCR_ESP *);
int (*dma_ports_p)(struct NCR_ESP *);
void (*dma_setup)(struct NCR_ESP *, __u32, int, int);
/* Optional functions (i.e. may be initialized to 0) */
void (*dma_barrier)(struct NCR_ESP *);
void (*dma_drain)(struct NCR_ESP *);
void (*dma_invalidate)(struct NCR_ESP *);
void (*dma_irq_entry)(struct NCR_ESP *);
void (*dma_irq_exit)(struct NCR_ESP *);
void (*dma_led_off)(struct NCR_ESP *);
void (*dma_led_on)(struct NCR_ESP *);
void (*dma_poll)(struct NCR_ESP *, unsigned char *);
void (*dma_reset)(struct NCR_ESP *);
/* Optional virtual DMA functions */
void (*dma_mmu_get_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
void (*dma_mmu_get_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
void (*dma_mmu_release_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
void (*dma_mmu_release_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
void (*dma_advance_sg)(Scsi_Cmnd *);
};
/* Bitfield meanings for the above registers. */
/* ESP config reg 1, read-write, found on all ESP chips */
#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236+fsc chips */
#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236,fsc) */
#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236,fsc) */
#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */
#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236,fsc) */
#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216,fsc) */
#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */
#define ESP_CONFIG2_RFB 0x80 /* Reserve FIFO byte (fsc) */
#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
/* ESP config register 3 read-write, found only esp236+fas236+fas100a+fsc chips */
#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/fas366) */
#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236/fsc) */
#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a) */
#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236/fsc) */
#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a) */
#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236/fsc) */
#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a) */
#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236/fsc) */
#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a) */
#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236/fsc) */
#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236/fsc) */
#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236/fsc) */
#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236/fsc) */
/* ESP config register 4 read-write, found only on fsc chips */
#define ESP_CONFIG4_BBTE 0x01 /* Back-to-Back transfer enable */
#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode */
#define ESP_CONFIG4_EAN 0x04 /* Enable Active Negotiation */
/* ESP command register read-write */
/* Group 1 commands: These may be sent at any point in time to the ESP
* chip. None of them can generate interrupts 'cept
* the "SCSI bus reset" command if you have not disabled
* SCSI reset interrupts in the config1 ESP register.
*/
#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
#define ESP_CMD_RC 0x02 /* Chip reset */
#define ESP_CMD_RS 0x03 /* SCSI bus reset */
/* Group 2 commands: ESP must be an initiator and connected to a target
* for these commands to work.
*/
#define ESP_CMD_TI 0x10 /* Transfer Information */
#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
#define ESP_CMD_SATN 0x1a /* Set ATN */
#define ESP_CMD_RATN 0x1b /* De-assert ATN */
/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
* to a target as the initiator for these commands to work.
*/
#define ESP_CMD_SMSG 0x20 /* Send message */
#define ESP_CMD_SSTAT 0x21 /* Send status */
#define ESP_CMD_SDATA 0x22 /* Send data */
#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
#define ESP_CMD_DCNCT 0x27 /* Disconnect */
#define ESP_CMD_RMSG 0x28 /* Receive Message */
#define ESP_CMD_RCMD 0x29 /* Receive Command */
#define ESP_CMD_RDATA 0x2a /* Receive Data */
#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
/* Group 4 commands: The ESP must be in the disconnected state and must
* not be connected to any targets as initiator for
* these commands to work.
*/
#define ESP_CMD_RSEL 0x40 /* Reselect */
#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
#define ESP_CMD_SELA 0x42 /* Select w/ATN */
#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
#define ESP_CMD_ESEL 0x44 /* Enable selection */
#define ESP_CMD_DSEL 0x45 /* Disable selections */
#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
/* This bit enables the ESP's DMA */
#define ESP_CMD_DMA 0x80 /* Do DMA? */
/* ESP status register read-only */
#define ESP_STAT_PIO 0x01 /* IO phase bit */
#define ESP_STAT_PCD 0x02 /* CD phase bit */
#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
#define ESP_STAT_PERR 0x20 /* Parity error */
#define ESP_STAT_SPAM 0x40 /* Real bad error */
/* This indicates the 'interrupt pending' condition, it is a reserved
* bit on old revs of the ESP (ESP100, ESP100A, FAS100A).
*/
#define ESP_STAT_INTR 0x80 /* Interrupt */
/* The status register can be masked with ESP_STAT_PMASK and compared
* with the following values to determine the current phase the ESP
* (at least thinks it) is in. For our purposes we also add our own
* software 'done' bit for our phase management engine.
*/
#define ESP_DOP (0) /* Data Out */
#define ESP_DIP (ESP_STAT_PIO) /* Data In */
#define ESP_CMDP (ESP_STAT_PCD) /* Command */
#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
/* ESP interrupt register read-only */
#define ESP_INTR_S 0x01 /* Select w/o ATN */
#define ESP_INTR_SATN 0x02 /* Select w/ATN */
#define ESP_INTR_RSEL 0x04 /* Reselected */
#define ESP_INTR_FDONE 0x08 /* Function done */
#define ESP_INTR_BSERV 0x10 /* Bus service */
#define ESP_INTR_DC 0x20 /* Disconnect */
#define ESP_INTR_IC 0x40 /* Illegal command given */
#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
/* Interrupt status macros */
#define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR))
#define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC))
#define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN))
#define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S))
#define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \
(ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
#define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL))
/* ESP sequence step register read-only */
#define ESP_STEP_VBITS 0x07 /* Valid bits */
#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
#define ESP_STEP_SID 0x01 /* One msg byte sent */
#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
* bytes to be lost
*/
#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
/* Ho hum, some ESP's set the step register to this as well... */
#define ESP_STEP_FINI5 0x05
#define ESP_STEP_FINI6 0x06
#define ESP_STEP_FINI7 0x07
#define ESP_STEP_SOM 0x08 /* Synchronous Offset Max */
/* ESP chip-test register read-write */
#define ESP_TEST_TARG 0x01 /* Target test mode */
#define ESP_TEST_INI 0x02 /* Initiator test mode */
#define ESP_TEST_TS 0x04 /* Tristate test mode */
/* ESP unique ID register read-only, found on fas236+fas100a+fsc only */
#define ESP_UID_F100A 0x00 /* FAS100A */
#define ESP_UID_F236 0x02 /* FAS236 */
#define ESP_UID_FSC 0xa2 /* NCR53CF9x-2 */
#define ESP_UID_REV 0x07 /* ESP revision */
#define ESP_UID_FAM 0xf8 /* ESP family */
/* ESP fifo flags register read-only */
/* Note that the following implies a 16 byte FIFO on the ESP. */
#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100,fsc) */
#define ESP_FF_SSTEP 0xe0 /* Sequence step */
/* ESP clock conversion factor register write-only */
#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
#define ESP_CCF_F2 0x02 /* 10MHz */
#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
#define ESP_TIMEO_CONST 8192
#define FSC_TIMEO_CONST 7668
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
#define FSC_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (7668 * (cfact)))
#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
/* UGLY, UGLY, UGLY! */
extern int nesps, esps_in_use, esps_running;
/* For our interrupt engine. */
#define for_each_esp(esp) \
for((esp) = espchain; (esp); (esp) = (esp)->next)
/* External functions */
extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs);
extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *, int);
extern void esp_deallocate(struct NCR_ESP *);
extern void esp_release(void);
extern void esp_initialize(struct NCR_ESP *);
extern irqreturn_t esp_intr(int, void *);
extern const char *esp_info(struct Scsi_Host *);
extern int esp_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
extern int esp_abort(Scsi_Cmnd *);
extern int esp_reset(Scsi_Cmnd *);
extern int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length,
int inout);
extern int esp_slave_alloc(struct scsi_device *);
extern void esp_slave_destroy(struct scsi_device *);
#endif /* !(NCR53C9X_H) */

View file

@ -859,44 +859,31 @@ static int setinqserial(struct aac_dev *dev, void *data, int cid)
le32_to_cpu(dev->adapter_info.serial[0]), cid);
}
static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
u8 a_sense_code, u8 incorrect_length,
u8 bit_pointer, u16 field_pointer,
u32 residue)
static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
{
sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
u8 *sense_buf = (u8 *)sense_data;
/* Sense data valid, err code 70h */
sense_buf[0] = 0x70; /* No info field */
sense_buf[1] = 0; /* Segment number, always zero */
if (incorrect_length) {
sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
sense_buf[3] = BYTE3(residue);
sense_buf[4] = BYTE2(residue);
sense_buf[5] = BYTE1(residue);
sense_buf[6] = BYTE0(residue);
} else
sense_buf[2] = sense_key; /* Sense key */
if (sense_key == ILLEGAL_REQUEST)
sense_buf[7] = 10; /* Additional sense length */
else
sense_buf[7] = 6; /* Additional sense length */
sense_buf[2] = sense_key; /* Sense key */
sense_buf[12] = sense_code; /* Additional sense code */
sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
if (sense_key == ILLEGAL_REQUEST) {
sense_buf[15] = 0;
sense_buf[7] = 10; /* Additional sense length */
if (sense_code == SENCODE_INVALID_PARAM_FIELD)
sense_buf[15] = 0x80;/* Std sense key specific field */
sense_buf[15] = bit_pointer;
/* Illegal parameter is in the parameter block */
if (sense_code == SENCODE_INVALID_CDB_FIELD)
sense_buf[15] = 0xc0;/* Std sense key specific field */
sense_buf[15] |= 0xc0;/* Std sense key specific field */
/* Illegal parameter is in the CDB block */
sense_buf[15] |= bit_pointer;
sense_buf[16] = field_pointer >> 8; /* MSB */
sense_buf[17] = field_pointer; /* LSB */
}
} else
sense_buf[7] = 6; /* Additional sense length */
}
static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
@ -906,11 +893,9 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@ -1520,11 +1505,9 @@ static void io_callback(void *context, struct fib * fibptr)
le32_to_cpu(readreply->status));
#endif
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@ -1733,11 +1716,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
le32_to_cpu(synchronizereply->status));
cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@ -1945,10 +1926,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST,
SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@ -1995,10 +1975,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST,
SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 0, 7, 2, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 7, 2);
memcpy(scsicmd->sense_buffer,
&dev->fsa_dev[cid].sense_data,
min_t(size_t,
@ -2254,9 +2233,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),

View file

@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
spin_lock_irqsave(&dev->fib_lock, flags);
entry = dev->fib_list.next;
fibctx = NULL;
@ -251,24 +252,25 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Extract the AdapterFibContext from the Input parameters.
*/
if (fibctx->unique == f.fibctx) { /* We found a winner */
if (fibctx->unique == f.fibctx) { /* We found a winner */
break;
}
entry = entry->next;
fibctx = NULL;
}
if (!fibctx) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context not found\n"));
return -EINVAL;
}
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context))) {
spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
return -EINVAL;
}
status = 0;
spin_lock_irqsave(&dev->fib_lock, flags);
/*
* If there are no fibs to send back, then either wait or return
* -EAGAIN
@ -414,8 +416,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
* @arg: ioctl arguments
*
* This routine returns the driver version.
* Under Linux, there have been no version incompatibilities, so this is
* simple!
* Under Linux, there have been no version incompatibilities, so this is
* simple!
*/
static int check_revision(struct aac_dev *dev, void __user *arg)
@ -463,7 +465,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
u32 sg_indx = 0;
u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
@ -517,11 +519,11 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
// Fix up srb for endian and force some values
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
srbcmd->flags = cpu_to_le32(flags);
srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
@ -786,9 +788,9 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
return -EFAULT;
if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
return -EFAULT;
}
return 0;
}

View file

@ -1130,31 +1130,29 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (error < 0)
goto out_deinit;
if (!(aac->adapter_info.options & AAC_OPT_NEW_COMM)) {
error = pci_set_dma_max_seg_size(pdev, 65536);
if (error)
goto out_deinit;
}
/*
* Lets override negotiations and drop the maximum SG limit to 34
*/
if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
(aac->scsi_host_ptr->sg_tablesize > 34)) {
aac->scsi_host_ptr->sg_tablesize = 34;
aac->scsi_host_ptr->max_sectors
= (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
(shost->sg_tablesize > 34)) {
shost->sg_tablesize = 34;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
(aac->scsi_host_ptr->sg_tablesize > 17)) {
aac->scsi_host_ptr->sg_tablesize = 17;
aac->scsi_host_ptr->max_sectors
= (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
(shost->sg_tablesize > 17)) {
shost->sg_tablesize = 17;
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
error = pci_set_dma_max_seg_size(pdev,
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
(shost->max_sectors << 9) : 65536);
if (error)
goto out_deinit;
/*
* Firware printf works only with older firmware.
* Firmware printf works only with older firmware.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
aac->printf_enabled = 1;

View file

@ -12261,7 +12261,7 @@ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
/*
* Write the EEPROM from 'cfg_buf'.
*/
void __devinit
static void __devinit
AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
{
ushort *wbuf;
@ -12328,7 +12328,7 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
void __devinit
static void __devinit
AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
{
ushort *wbuf;
@ -12395,7 +12395,7 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
void __devinit
static void __devinit
AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
{
ushort *wbuf;

View file

@ -48,7 +48,7 @@ struct class_device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
#define ARCMSR_MAX_OUTSTANDING_CMD 256
#define ARCMSR_MAX_FREECCB_NUM 320
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/08/30"
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@ -248,6 +248,7 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
@ -256,6 +257,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */

View file

@ -315,9 +315,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
(0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
}
reg = (struct MessageUnit_B *)(dma_coherent +
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
dma_addr = dma_coherent_handle;
ccb_tmp = (struct CommandControlBlock *)dma_coherent;
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
@ -371,8 +368,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
out:
dma_free_coherent(&acb->pdev->dev,
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20,
acb->dma_coherent, acb->dma_coherent_handle);
(ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
return -ENOMEM;
}
@ -509,6 +506,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
, reg->iop2drv_doorbell_reg);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
return 0x00;
}
msleep(10);
@ -748,6 +746,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t fla
, ccb->startdone
, atomic_read(&acb->ccboutstandingcount));
}
else
arcmsr_report_ccb_state(acb, ccb, flag_ccb);
}
@ -886,7 +885,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
}
}
static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
{
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
@ -906,6 +905,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
nseg = scsi_dma_map(pcmd);
if (nseg > ARCMSR_MAX_SG_ENTRIES)
return FAILED;
BUG_ON(nseg < 0);
if (nseg) {
@ -946,6 +947,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
ccb->ccb_flags |= CCB_FLAG_WRITE;
}
return SUCCESS;
}
static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
@ -1036,18 +1038,22 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
iounmap(acb->pmuA);
dma_free_coherent(&acb->pdev->dev,
ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
acb->dma_coherent,
acb->dma_coherent_handle);
break;
}
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
dma_free_coherent(&acb->pdev->dev,
(ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
}
}
dma_free_coherent(&acb->pdev->dev,
ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
acb->dma_coherent,
acb->dma_coherent_handle);
}
void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
@ -1273,7 +1279,9 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
return 1;
writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
/*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
readl(reg->iop2drv_doorbell_reg);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
@ -1380,12 +1388,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
case ARCMSR_MESSAGE_READ_RQBUFFER: {
unsigned long *ver_addr;
dma_addr_t buf_handle;
uint8_t *pQbuffer, *ptmpQbuffer;
int32_t allxfer_len = 0;
void *tmp;
ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
if (!ver_addr) {
tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
ver_addr = (unsigned long *)tmp;
if (!tmp) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
@ -1421,18 +1430,19 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
pcmdmessagefld->cmdmessage.Length = allxfer_len;
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
kfree(tmp);
}
break;
case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
unsigned long *ver_addr;
dma_addr_t buf_handle;
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer, *ptmpuserbuffer;
void *tmp;
ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
if (!ver_addr) {
tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
ver_addr = (unsigned long *)tmp;
if (!tmp) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
@ -1482,7 +1492,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
retvalue = ARCMSR_MESSAGE_FAIL;
}
}
pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
kfree(tmp);
}
break;
@ -1682,8 +1692,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
ccb = arcmsr_get_freeccb(acb);
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
arcmsr_build_ccb(acb, ccb, cmd);
if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
cmd->scsi_done(cmd);
return 0;
}
arcmsr_post_ccb(acb, ccb);
return 0;
}
@ -1844,7 +1857,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
}
}
static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_B *reg = acb->pmuB;
@ -1878,7 +1891,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
poll_ccb_done = (ccb == poll_ccb) ? 1:0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if (ccb->startdone == ARCMSR_CCB_ABORTED) {
if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: \
scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
,acb->host->host_no
@ -1901,7 +1914,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
} /*drain reply FIFO*/
}
static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \
static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
switch (acb->adapter_type) {
@ -2026,6 +2039,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
do {
firmware_state = readl(reg->iop2drv_doorbell_reg);
} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
}
break;
}
@ -2090,19 +2104,39 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
return;
case ACB_ADAPTER_TYPE_B:
{
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
if(arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
return;
}
}
break;
}
return;
}
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
arcmsr_wait_firmware_ready(acb);
arcmsr_iop_confirm(acb);
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_wait_firmware_ready(acb);
arcmsr_iop_confirm(acb);
arcmsr_get_firmware_spec(acb);
/*start background rebuild*/
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
arcmsr_clear_doorbell_queue_buffer(acb);
arcmsr_enable_eoi_mode(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
acb->acb_flags |= ACB_F_IOP_INITED;
@ -2275,6 +2309,7 @@ static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
arcmsr_clear_doorbell_queue_buffer(acb);
arcmsr_enable_eoi_mode(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
acb->acb_flags |= ACB_F_IOP_INITED;

View file

@ -1790,7 +1790,7 @@ int acornscsi_starttransfer(AS_Host *host)
return 0;
}
residual = host->SCpnt->request_bufflen - host->scsi.SCp.scsi_xferred;
residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
sbic_arm_writenext(host->scsi.io_port, residual >> 16);
@ -2270,7 +2270,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4b: /* -> PHASE_STATUSIN */
case 0x8b: /* -> PHASE_STATUSIN */
/* DATA IN -> STATUS */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_readstatusbyte(host);
@ -2281,7 +2281,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4e: /* -> PHASE_MSGOUT */
case 0x8e: /* -> PHASE_MSGOUT */
/* DATA IN -> MESSAGE OUT */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_sendmessage(host);
@ -2291,7 +2291,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4f: /* message in */
case 0x8f: /* message in */
/* DATA IN -> MESSAGE IN */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
@ -2319,7 +2319,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4b: /* -> PHASE_STATUSIN */
case 0x8b: /* -> PHASE_STATUSIN */
/* DATA OUT -> STATUS */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
@ -2331,7 +2331,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4e: /* -> PHASE_MSGOUT */
case 0x8e: /* -> PHASE_MSGOUT */
/* DATA OUT -> MESSAGE OUT */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
@ -2342,7 +2342,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4f: /* message in */
case 0x8f: /* message in */
/* DATA OUT -> MESSAGE IN */
host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);

View file

@ -18,17 +18,32 @@
* The scatter-gather list handling. This contains all
* the yucky stuff that needs to be fixed properly.
*/
/*
* copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
* entries of uninitialized memory. SCp is from scsi-ml and has a valid
* (possibly chained) sg-list
*/
static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
{
int bufs = SCp->buffers_residual;
/* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
* and to remove this BUG_ON. Use min() in-its-place
*/
BUG_ON(bufs + 1 > max);
sg_set_buf(sg, SCp->ptr, SCp->this_residual);
if (bufs)
memcpy(sg + 1, SCp->buffer + 1,
sizeof(struct scatterlist) * bufs);
if (bufs) {
struct scatterlist *src_sg;
unsigned i;
for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
*(++sg) = *src_sg;
sg_mark_end(sg);
}
return bufs + 1;
}
@ -36,7 +51,7 @@ static inline int next_SCp(struct scsi_pointer *SCp)
{
int ret = SCp->buffers_residual;
if (ret) {
SCp->buffer++;
SCp->buffer = sg_next(SCp->buffer);
SCp->buffers_residual--;
SCp->ptr = sg_virt(SCp->buffer);
SCp->this_residual = SCp->buffer->length;
@ -68,46 +83,46 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
{
memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
if (SCpnt->use_sg) {
if (scsi_bufflen(SCpnt)) {
unsigned long len = 0;
int buf;
SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
SCpnt->SCp.buffer = scsi_sglist(SCpnt);
SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
SCpnt->SCp.phase = SCpnt->request_bufflen;
SCpnt->SCp.phase = scsi_bufflen(SCpnt);
#ifdef BELT_AND_BRACES
/*
* Calculate correct buffer length. Some commands
* come in with the wrong request_bufflen.
*/
for (buf = 0; buf <= SCpnt->SCp.buffers_residual; buf++)
len += SCpnt->SCp.buffer[buf].length;
{ /*
* Calculate correct buffer length. Some commands
* come in with the wrong scsi_bufflen.
*/
struct scatterlist *sg;
unsigned i, sg_count = scsi_sg_count(SCpnt);
if (SCpnt->request_bufflen != len)
printk(KERN_WARNING "scsi%d.%c: bad request buffer "
"length %d, should be %ld\n", SCpnt->device->host->host_no,
'0' + SCpnt->device->id, SCpnt->request_bufflen, len);
SCpnt->request_bufflen = len;
scsi_for_each_sg(SCpnt, sg, sg_count, i)
len += sg->length;
if (scsi_bufflen(SCpnt) != len) {
printk(KERN_WARNING
"scsi%d.%c: bad request buffer "
"length %d, should be %ld\n",
SCpnt->device->host->host_no,
'0' + SCpnt->device->id,
scsi_bufflen(SCpnt), len);
/*
* FIXME: Totaly naive fixup. We should abort
* with error
*/
SCpnt->SCp.phase =
min_t(unsigned long, len,
scsi_bufflen(SCpnt));
}
}
#endif
} else {
SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
SCpnt->SCp.this_residual = SCpnt->request_bufflen;
SCpnt->SCp.phase = SCpnt->request_bufflen;
}
/*
* If the upper SCSI layers pass a buffer, but zero length,
* we aren't interested in the buffer pointer.
*/
if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.ptr) {
#if 0 //def BELT_AND_BRACES
printk(KERN_WARNING "scsi%d.%c: zero length buffer passed for "
"command ", SCpnt->host->host_no, '0' + SCpnt->target);
__scsi_print_command(SCpnt->cmnd);
#endif
SCpnt->SCp.ptr = NULL;
SCpnt->SCp.this_residual = 0;
SCpnt->SCp.phase = 0;
}
}

View file

@ -1,353 +0,0 @@
/* blz1230.c: Driver for Blizzard 1230 SCSI IV Controller.
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* This driver is based on the CyberStorm driver, hence the occasional
* reference to CyberStorm.
*/
/* TODO:
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
#define MKIV 1
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define BLZ1230_ESP_ADDR 0x8000
#define BLZ1230_DMA_ADDR 0x10000
#define BLZ1230II_ESP_ADDR 0x10000
#define BLZ1230II_DMA_ADDR 0x10021
/* The Blizzard 1230 DMA interface
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Only two things can be programmed in the Blizzard DMA:
* 1) The data direction is controlled by the status of bit 31 (1 = write)
* 2) The source/dest address (word aligned, shifted one right) in bits 30-0
*
* Program DMA by first latching the highest byte of the address/direction
* (i.e. bits 31-24 of the long word constructed as described in steps 1+2
* above). Then write each byte of the address/direction (starting with the
* top byte, working down) to the DMA address register.
*
* Figure out interrupt status by reading the ESP status byte.
*/
struct blz1230_dma_registers {
volatile unsigned char dma_addr; /* DMA address [0x0000] */
unsigned char dmapad2[0x7fff];
volatile unsigned char dma_latch; /* DMA latch [0x8000] */
};
struct blz1230II_dma_registers {
volatile unsigned char dma_addr; /* DMA address [0x0000] */
unsigned char dmapad2[0xf];
volatile unsigned char dma_latch; /* DMA latch [0x0010] */
};
#define BLZ1230_DMA_WRITE 0x80000000
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
struct ESP_regs *eregs;
unsigned long board;
#if MKIV
#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260
#define REAL_BLZ1230_ESP_ADDR BLZ1230_ESP_ADDR
#define REAL_BLZ1230_DMA_ADDR BLZ1230_DMA_ADDR
#else
#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060
#define REAL_BLZ1230_ESP_ADDR BLZ1230II_ESP_ADDR
#define REAL_BLZ1230_DMA_ADDR BLZ1230II_DMA_ADDR
#endif
if ((z = zorro_find_device(REAL_BLZ1230_ID, z))) {
board = z->resource.start;
if (request_mem_region(board+REAL_BLZ1230_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
/* Do some magic to figure out if the blizzard is
* equipped with a SCSI controller
*/
address = ZTWO_VADDR(board);
eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR);
esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR,
0);
esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
udelay(5);
if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
goto err_out;
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = 0;
esp->dma_led_off = 0;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* SCSI chip speed */
esp->cfreq = 40000000;
/* The DMA registers on the Blizzard are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
esp->dregs = (void *)(address + REAL_BLZ1230_DMA_ADDR);
/* ESP register base */
esp->eregs = eregs;
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+REAL_BLZ1230_ESP_ADDR;
if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"Blizzard 1230 SCSI IV", esp->ehost))
goto err_out;
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
err_out:
scsi_unregister(esp->ehost);
esp_deallocate(esp);
release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
sizeof(struct ESP_regs));
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the Blizzard DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/* I don't think there's any limit on the Blizzard DMA. So we use what
* the ESP chip can handle (24 bit).
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x1000000)
sz = 0x1000000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
#if MKIV
struct blz1230_dma_registers *dregs =
(struct blz1230_dma_registers *) (esp->dregs);
#else
struct blz1230II_dma_registers *dregs =
(struct blz1230II_dma_registers *) (esp->dregs);
#endif
cache_clear(addr, length);
addr >>= 1;
addr &= ~(BLZ1230_DMA_WRITE);
/* First set latch */
dregs->dma_latch = (addr >> 24) & 0xff;
/* Then pump the address to the DMA address register */
#if MKIV
dregs->dma_addr = (addr >> 24) & 0xff;
#endif
dregs->dma_addr = (addr >> 16) & 0xff;
dregs->dma_addr = (addr >> 8) & 0xff;
dregs->dma_addr = (addr ) & 0xff;
}
void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
#if MKIV
struct blz1230_dma_registers *dregs =
(struct blz1230_dma_registers *) (esp->dregs);
#else
struct blz1230II_dma_registers *dregs =
(struct blz1230II_dma_registers *) (esp->dregs);
#endif
cache_push(addr, length);
addr >>= 1;
addr |= BLZ1230_DMA_WRITE;
/* First set latch */
dregs->dma_latch = (addr >> 24) & 0xff;
/* Then pump the address to the DMA address register */
#if MKIV
dregs->dma_addr = (addr >> 24) & 0xff;
#endif
dregs->dma_addr = (addr >> 16) & 0xff;
dregs->dma_addr = (addr >> 8) & 0xff;
dregs->dma_addr = (addr ) & 0xff;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int blz1230_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-blz1230",
.proc_info = esp_proc_info,
.name = "Blizzard1230 SCSI IV",
.detect = blz1230_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = blz1230_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");

View file

@ -1,306 +0,0 @@
/* blz2060.c: Driver for Blizzard 2060 SCSI Controller.
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* This driver is based on the CyberStorm driver, hence the occasional
* reference to CyberStorm.
*/
/* TODO:
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define BLZ2060_ESP_ADDR 0x1ff00
#define BLZ2060_DMA_ADDR 0x1ffe0
/* The Blizzard 2060 DMA interface
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Only two things can be programmed in the Blizzard DMA:
* 1) The data direction is controlled by the status of bit 31 (1 = write)
* 2) The source/dest address (word aligned, shifted one right) in bits 30-0
*
* Figure out interrupt status by reading the ESP status byte.
*/
struct blz2060_dma_registers {
volatile unsigned char dma_led_ctrl; /* DMA led control [0x000] */
unsigned char dmapad1[0x0f];
volatile unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
unsigned char dmapad2[0x03];
volatile unsigned char dma_addr1; /* DMA address [0x014] */
unsigned char dmapad3[0x03];
volatile unsigned char dma_addr2; /* DMA address [0x018] */
unsigned char dmapad4[0x03];
volatile unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
};
#define BLZ2060_DMA_WRITE 0x80000000
/* DMA control bits */
#define BLZ2060_DMA_LED 0x02 /* HD led control 1 = off */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_led_off(struct NCR_ESP *esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int __init blz2060_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_2060, z))) {
unsigned long board = z->resource.start;
if (request_mem_region(board+BLZ2060_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0);
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = &dma_led_on;
esp->dma_led_off = &dma_led_off;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* SCSI chip speed */
esp->cfreq = 40000000;
/* The DMA registers on the Blizzard are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
address = (unsigned long)ZTWO_VADDR(board);
esp->dregs = (void *)(address + BLZ2060_DMA_ADDR);
/* ESP register base */
esp->eregs = (struct ESP_regs *)(address + BLZ2060_ESP_ADDR);
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"Blizzard 2060 SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the Blizzard DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/* I don't think there's any limit on the Blizzard DMA. So we use what
* the ESP chip can handle (24 bit).
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x1000000)
sz = 0x1000000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
struct blz2060_dma_registers *dregs =
(struct blz2060_dma_registers *) (esp->dregs);
cache_clear(addr, length);
addr >>= 1;
addr &= ~(BLZ2060_DMA_WRITE);
dregs->dma_addr3 = (addr ) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr0 = (addr >> 24) & 0xff;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
struct blz2060_dma_registers *dregs =
(struct blz2060_dma_registers *) (esp->dregs);
cache_push(addr, length);
addr >>= 1;
addr |= BLZ2060_DMA_WRITE;
dregs->dma_addr3 = (addr ) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr0 = (addr >> 24) & 0xff;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
}
static void dma_led_off(struct NCR_ESP *esp)
{
((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl =
BLZ2060_DMA_LED;
}
static void dma_led_on(struct NCR_ESP *esp)
{
((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl = 0;
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int blz2060_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-blz2060",
.proc_info = esp_proc_info,
.name = "Blizzard2060 SCSI",
.detect = blz2060_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = blz2060_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");

View file

@ -1,377 +0,0 @@
/* cyberstorm.c: Driver for CyberStorm SCSI Controller.
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* The CyberStorm SCSI driver is based on David S. Miller's ESP driver
* for the Sparc computers.
*
* This work was made possible by Phase5 who willingly (and most generously)
* supported me with hardware and all the information I needed.
*/
/* TODO:
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define CYBER_ESP_ADDR 0xf400
#define CYBER_DMA_ADDR 0xf800
/* The CyberStorm DMA interface */
struct cyber_dma_registers {
volatile unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
unsigned char dmapad1[1];
volatile unsigned char dma_addr1; /* DMA address [0x002] */
unsigned char dmapad2[1];
volatile unsigned char dma_addr2; /* DMA address [0x004] */
unsigned char dmapad3[1];
volatile unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
unsigned char dmapad4[0x3fb];
volatile unsigned char cond_reg; /* DMA cond (ro) [0x402] */
#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
};
/* DMA control bits */
#define CYBER_DMA_LED 0x80 /* HD led control 1 = on */
#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
/* DMA status bits */
#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
/* The bits below appears to be Phase5 Debug bits only; they were not
* described by Phase5 so using them may seem a bit stupid...
*/
#define CYBER_HOST_ID 0x02 /* If set, host ID should be 7, otherwise
* it should be 6.
*/
#define CYBER_SLOW_CABLE 0x08 /* If *not* set, assume SLOW_CABLE */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_led_off(struct NCR_ESP *esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
* to ctrl_reg. Always write a copy
* to this register when writing to
* the hardware register!
*/
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int __init cyber_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
unsigned long board = z->resource.start;
if ((z->id == ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM ||
z->id == ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060) &&
request_mem_region(board+CYBER_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
/* Figure out if this is a CyberStorm or really a
* Fastlane/Blizzard Mk II by looking at the board size.
* CyberStorm maps 64kB
* (ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM does anyway)
*/
if(z->resource.end-board != 0xffff) {
release_mem_region(board+CYBER_ESP_ADDR,
sizeof(struct ESP_regs));
return 0;
}
esp = esp_allocate(tpnt, (void *)board + CYBER_ESP_ADDR, 0);
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = &dma_led_on;
esp->dma_led_off = &dma_led_off;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* SCSI chip speed */
esp->cfreq = 40000000;
/* The DMA registers on the CyberStorm are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
address = (unsigned long)ZTWO_VADDR(board);
esp->dregs = (void *)(address + CYBER_DMA_ADDR);
/* ESP register base */
esp->eregs = (struct ESP_regs *)(address + CYBER_ESP_ADDR);
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"CyberStorm SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
/* The DMA cond flag contains a hardcoded jumper bit
* which can be used to select host number 6 or 7.
* However, even though it may change, we use a hardcoded
* value of 7.
*/
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the CyberStorm DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/* I don't think there's any limit on the CyberDMA. So we use what
* the ESP chip can handle (24 bit).
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x1000000)
sz = 0x1000000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
esp->esp_id, ((struct cyber_dma_registers *)
(esp->dregs))->cond_reg));
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
struct cyber_dma_registers *dregs =
(struct cyber_dma_registers *) esp->dregs;
cache_clear(addr, length);
addr &= ~(1);
dregs->dma_addr0 = (addr >> 24) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr3 = (addr ) & 0xff;
ctrl_data &= ~(CYBER_DMA_WRITE);
/* Check if physical address is outside Z2 space and of
* block length/block aligned in memory. If this is the
* case, enable 32 bit transfer. In all other cases, fall back
* to 16 bit transfer.
* Obviously 32 bit transfer should be enabled if the DMA address
* and length are 32 bit aligned. However, this leads to some
* strange behavior. Even 64 bit aligned addr/length fails.
* Until I've found a reason for this, 32 bit transfer is only
* used for full-block transfers (1kB).
* -jskov
*/
#if 0
if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
(addr < 0xff0000)))
ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
else
ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
#else
ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
#endif
dregs->ctrl_reg = ctrl_data;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
struct cyber_dma_registers *dregs =
(struct cyber_dma_registers *) esp->dregs;
cache_push(addr, length);
addr |= 1;
dregs->dma_addr0 = (addr >> 24) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr3 = (addr ) & 0xff;
ctrl_data |= CYBER_DMA_WRITE;
/* See comment above */
#if 0
if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
(addr < 0xff0000)))
ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
else
ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
#else
ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
#endif
dregs->ctrl_reg = ctrl_data;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
/* It's important to check the DMA IRQ bit in the correct way! */
return ((esp_read(esp->eregs->esp_status) & ESP_STAT_INTR) &&
((((struct cyber_dma_registers *)(esp->dregs))->cond_reg) &
CYBER_DMA_HNDL_INTR));
}
static void dma_led_off(struct NCR_ESP *esp)
{
ctrl_data &= ~CYBER_DMA_LED;
((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
}
static void dma_led_on(struct NCR_ESP *esp)
{
ctrl_data |= CYBER_DMA_LED;
((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int cyber_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-cyberstorm",
.proc_info = esp_proc_info,
.name = "CyberStorm SCSI",
.detect = cyber_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = cyber_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");

View file

@ -1,314 +0,0 @@
/* cyberstormII.c: Driver for CyberStorm SCSI Mk II
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* This driver is based on cyberstorm.c
*/
/* TODO:
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define CYBERII_ESP_ADDR 0x1ff03
#define CYBERII_DMA_ADDR 0x1ff43
/* The CyberStorm II DMA interface */
struct cyberII_dma_registers {
volatile unsigned char cond_reg; /* DMA cond (ro) [0x000] */
#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
unsigned char dmapad4[0x3f];
volatile unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
unsigned char dmapad1[3];
volatile unsigned char dma_addr1; /* DMA address [0x044] */
unsigned char dmapad2[3];
volatile unsigned char dma_addr2; /* DMA address [0x048] */
unsigned char dmapad3[3];
volatile unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
};
/* DMA control bits */
#define CYBERII_DMA_LED 0x02 /* HD led control 1 = on */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_led_off(struct NCR_ESP *esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int __init cyberII_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
struct ESP_regs *eregs;
if ((z = zorro_find_device(ZORRO_PROD_PHASE5_CYBERSTORM_MK_II, z))) {
unsigned long board = z->resource.start;
if (request_mem_region(board+CYBERII_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
/* Do some magic to figure out if the CyberStorm Mk II
* is equipped with a SCSI controller
*/
address = (unsigned long)ZTWO_VADDR(board);
eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR);
esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0);
esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
udelay(5);
if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) {
esp_deallocate(esp);
scsi_unregister(esp->ehost);
release_mem_region(board+CYBERII_ESP_ADDR,
sizeof(struct ESP_regs));
return 0; /* Bail out if address did not hold data */
}
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = &dma_led_on;
esp->dma_led_off = &dma_led_off;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* SCSI chip speed */
esp->cfreq = 40000000;
/* The DMA registers on the CyberStorm are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
esp->dregs = (void *)(address + CYBERII_DMA_ADDR);
/* ESP register base */
esp->eregs = eregs;
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"CyberStorm SCSI Mk II", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the CyberStorm DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/* I don't think there's any limit on the CyberDMA. So we use what
* the ESP chip can handle (24 bit).
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x1000000)
sz = 0x1000000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
esp->esp_id, ((struct cyberII_dma_registers *)
(esp->dregs))->cond_reg));
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
struct cyberII_dma_registers *dregs =
(struct cyberII_dma_registers *) esp->dregs;
cache_clear(addr, length);
addr &= ~(1);
dregs->dma_addr0 = (addr >> 24) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr3 = (addr ) & 0xff;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
struct cyberII_dma_registers *dregs =
(struct cyberII_dma_registers *) esp->dregs;
cache_push(addr, length);
addr |= 1;
dregs->dma_addr0 = (addr >> 24) & 0xff;
dregs->dma_addr1 = (addr >> 16) & 0xff;
dregs->dma_addr2 = (addr >> 8) & 0xff;
dregs->dma_addr3 = (addr ) & 0xff;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
/* It's important to check the DMA IRQ bit in the correct way! */
return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
}
static void dma_led_off(struct NCR_ESP *esp)
{
((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg &= ~CYBERII_DMA_LED;
}
static void dma_led_on(struct NCR_ESP *esp)
{
((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg |= CYBERII_DMA_LED;
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int cyberII_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-cyberstormII",
.proc_info = esp_proc_info,
.name = "CyberStorm Mk II SCSI",
.detect = cyberII_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = cyberII_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");

View file

@ -4267,7 +4267,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
int srb_idx = 0;
unsigned i = 0;
struct SGentry *ptr;
struct SGentry *uninitialized_var(ptr);
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;

View file

@ -1,687 +0,0 @@
/*
* dec_esp.c: Driver for SCSI chips on IOASIC based TURBOchannel DECstations
* and TURBOchannel PMAZ-A cards
*
* TURBOchannel changes by Harald Koerfgen
* PMAZ-A support by David Airlie
*
* based on jazz_esp.c:
* Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*
* jazz_esp is based on David S. Miller's ESP driver and cyber_esp
*
* 20000819 - Small PMAZ-AA fixes by Florian Lohoff <flo@rfc822.org>
* Be warned the PMAZ-AA works currently as a single card.
* Dont try to put multiple cards in one machine - They are
* both detected but it may crash under high load garbling your
* data.
* 20001005 - Initialization fixes for 2.4.0-test9
* Florian Lohoff <flo@rfc822.org>
*
* Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/tc.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/ioasic_ints.h>
#include <asm/dec/machtype.h>
#include <asm/dec/system.h>
#define DEC_SCSI_SREG 0
#define DEC_SCSI_DMAREG 0x40000
#define DEC_SCSI_SRAM 0x80000
#define DEC_SCSI_DIAG 0xC0000
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static void dma_drain(struct NCR_ESP *esp);
static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp);
static void dma_advance_sg(struct scsi_cmnd * sp);
static void pmaz_dma_drain(struct NCR_ESP *esp);
static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
static void pmaz_dma_ints_off(struct NCR_ESP *esp);
static void pmaz_dma_ints_on(struct NCR_ESP *esp);
static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
#define TC_ESP_RAM_SIZE 0x20000
#define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1))
#define ESP_NCMD 7
#define TC_ESP_DMAR_MASK 0x1ffff
#define TC_ESP_DMAR_WRITE 0x80000000
#define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK)
u32 esp_virt_buffer;
int scsi_current_length;
volatile unsigned char cmd_buffer[16];
volatile unsigned char pmaz_cmd_buffer[16];
/* This is where all commands are put
* before they are trasfered to the ESP chip
* via PIO.
*/
static irqreturn_t scsi_dma_merr_int(int, void *);
static irqreturn_t scsi_dma_err_int(int, void *);
static irqreturn_t scsi_dma_int(int, void *);
static struct scsi_host_template dec_esp_template = {
.module = THIS_MODULE,
.name = "NCR53C94",
.info = esp_info,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.proc_info = esp_proc_info,
.proc_name = "dec_esp",
.can_queue = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
static struct NCR_ESP *dec_esp_platform;
/***************************************************************** Detection */
static int dec_esp_platform_probe(void)
{
struct NCR_ESP *esp;
int err = 0;
if (IOASIC) {
esp = esp_allocate(&dec_esp_template, NULL, 1);
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = &dma_drain;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_poll = 0;
esp->dma_reset = 0;
esp->dma_led_off = 0;
esp->dma_led_on = 0;
/* virtual DMA functions */
esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
esp->dma_mmu_release_scsi_one = 0;
esp->dma_mmu_release_scsi_sgl = 0;
esp->dma_advance_sg = &dma_advance_sg;
/* SCSI chip speed */
esp->cfreq = 25000000;
esp->dregs = 0;
/* ESP register base */
esp->eregs = (void *)CKSEG1ADDR(dec_kn_slot_base +
IOASIC_SCSI);
/* Set the command buffer */
esp->esp_command = (volatile unsigned char *) cmd_buffer;
/* get virtual dma address for command buffer */
esp->esp_command_dvma = virt_to_phys(cmd_buffer);
esp->irq = dec_interrupt[DEC_IRQ_ASC];
esp->scsi_id = 7;
/* Check for differential SCSI-bus */
esp->diff = 0;
err = request_irq(esp->irq, esp_intr, IRQF_DISABLED,
"ncr53c94", esp->ehost);
if (err)
goto err_alloc;
err = request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
scsi_dma_merr_int, IRQF_DISABLED,
"ncr53c94 error", esp->ehost);
if (err)
goto err_irq;
err = request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
scsi_dma_err_int, IRQF_DISABLED,
"ncr53c94 overrun", esp->ehost);
if (err)
goto err_irq_merr;
err = request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], scsi_dma_int,
IRQF_DISABLED, "ncr53c94 dma", esp->ehost);
if (err)
goto err_irq_err;
esp_initialize(esp);
err = scsi_add_host(esp->ehost, NULL);
if (err) {
printk(KERN_ERR "ESP: Unable to register adapter\n");
goto err_irq_dma;
}
scsi_scan_host(esp->ehost);
dec_esp_platform = esp;
}
return 0;
err_irq_dma:
free_irq(dec_interrupt[DEC_IRQ_ASC_DMA], esp->ehost);
err_irq_err:
free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], esp->ehost);
err_irq_merr:
free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], esp->ehost);
err_irq:
free_irq(esp->irq, esp->ehost);
err_alloc:
esp_deallocate(esp);
scsi_host_put(esp->ehost);
return err;
}
static int __init dec_esp_probe(struct device *dev)
{
struct NCR_ESP *esp;
resource_size_t start, len;
int err;
esp = esp_allocate(&dec_esp_template, NULL, 1);
dev_set_drvdata(dev, esp);
start = to_tc_dev(dev)->resource.start;
len = to_tc_dev(dev)->resource.end - start + 1;
if (!request_mem_region(start, len, dev->bus_id)) {
printk(KERN_ERR "%s: Unable to reserve MMIO resource\n",
dev->bus_id);
err = -EBUSY;
goto err_alloc;
}
/* Store base addr into esp struct. */
esp->slot = start;
esp->dregs = 0;
esp->eregs = (void *)CKSEG1ADDR(start + DEC_SCSI_SREG);
esp->do_pio_cmds = 1;
/* Set the command buffer. */
esp->esp_command = (volatile unsigned char *)pmaz_cmd_buffer;
/* Get virtual dma address for command buffer. */
esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer);
esp->cfreq = tc_get_speed(to_tc_dev(dev)->bus);
esp->irq = to_tc_dev(dev)->interrupt;
/* Required functions. */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &pmaz_dma_init_read;
esp->dma_init_write = &pmaz_dma_init_write;
esp->dma_ints_off = &pmaz_dma_ints_off;
esp->dma_ints_on = &pmaz_dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &pmaz_dma_setup;
/* Optional functions. */
esp->dma_barrier = 0;
esp->dma_drain = &pmaz_dma_drain;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_poll = 0;
esp->dma_reset = 0;
esp->dma_led_off = 0;
esp->dma_led_on = 0;
esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one;
esp->dma_mmu_get_scsi_sgl = 0;
esp->dma_mmu_release_scsi_one = 0;
esp->dma_mmu_release_scsi_sgl = 0;
esp->dma_advance_sg = 0;
err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, "PMAZ_AA",
esp->ehost);
if (err) {
printk(KERN_ERR "%s: Unable to get IRQ %d\n",
dev->bus_id, esp->irq);
goto err_resource;
}
esp->scsi_id = 7;
esp->diff = 0;
esp_initialize(esp);
err = scsi_add_host(esp->ehost, dev);
if (err) {
printk(KERN_ERR "%s: Unable to register adapter\n",
dev->bus_id);
goto err_irq;
}
scsi_scan_host(esp->ehost);
return 0;
err_irq:
free_irq(esp->irq, esp->ehost);
err_resource:
release_mem_region(start, len);
err_alloc:
esp_deallocate(esp);
scsi_host_put(esp->ehost);
return err;
}
static void __exit dec_esp_platform_remove(void)
{
struct NCR_ESP *esp = dec_esp_platform;
free_irq(esp->irq, esp->ehost);
esp_deallocate(esp);
scsi_host_put(esp->ehost);
dec_esp_platform = NULL;
}
static void __exit dec_esp_remove(struct device *dev)
{
struct NCR_ESP *esp = dev_get_drvdata(dev);
free_irq(esp->irq, esp->ehost);
esp_deallocate(esp);
scsi_host_put(esp->ehost);
}
/************************************************************* DMA Functions */
static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id)
{
printk("Got unexpected SCSI DMA Interrupt! < ");
printk("SCSI_DMA_MEMRDERR ");
printk(">\n");
return IRQ_HANDLED;
}
static irqreturn_t scsi_dma_err_int(int irq, void *dev_id)
{
/* empty */
return IRQ_HANDLED;
}
static irqreturn_t scsi_dma_int(int irq, void *dev_id)
{
u32 scsi_next_ptr;
scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P);
/* next page */
scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3;
ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
fast_iob();
return IRQ_HANDLED;
}
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
return fifo_count;
}
static void dma_drain(struct NCR_ESP *esp)
{
u32 nw, data0, data1, scsi_data_ptr;
u16 *p;
nw = ioasic_read(IO_REG_SCSI_SCR);
/*
* Is there something in the dma buffers left?
*/
if (nw) {
scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3;
p = phys_to_virt(scsi_data_ptr);
switch (nw) {
case 1:
data0 = ioasic_read(IO_REG_SCSI_SDR0);
p[0] = data0 & 0xffff;
break;
case 2:
data0 = ioasic_read(IO_REG_SCSI_SDR0);
p[0] = data0 & 0xffff;
p[1] = (data0 >> 16) & 0xffff;
break;
case 3:
data0 = ioasic_read(IO_REG_SCSI_SDR0);
data1 = ioasic_read(IO_REG_SCSI_SDR1);
p[0] = data0 & 0xffff;
p[1] = (data0 >> 16) & 0xffff;
p[2] = data1 & 0xffff;
break;
default:
printk("Strange: %d words in dma buffer left\n", nw);
break;
}
}
}
static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
return sp->SCp.this_residual;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
}
static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
{
u32 scsi_next_ptr, ioasic_ssr;
unsigned long flags;
if (vaddress & 3)
panic("dec_esp.c: unable to handle partial word transfers, yet...");
dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
ioasic_ssr = ioasic_read(IO_REG_SSR);
ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN;
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_wmb();
ioasic_write(IO_REG_SCSI_SCR, 0);
ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
/* prepare for next page */
scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
fast_wmb();
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_iob();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
}
static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
{
u32 scsi_next_ptr, ioasic_ssr;
unsigned long flags;
if (vaddress & 3)
panic("dec_esp.c: unable to handle partial word transfers, yet...");
dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
ioasic_ssr = ioasic_read(IO_REG_SSR);
ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_wmb();
ioasic_write(IO_REG_SCSI_SCR, 0);
ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
/* prepare for next page */
scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
ioasic_ssr |= IO_SSR_SCSI_DMA_EN;
fast_wmb();
ioasic_write(IO_REG_SSR, ioasic_ssr);
fast_iob();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
return (esp->eregs->esp_status & ESP_STAT_INTR);
}
static int dma_ports_p(struct NCR_ESP *esp)
{
/*
* FIXME: what's this good for?
*/
return 1;
}
static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
{
/*
* DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if (write)
dma_init_read(esp, addr, count);
else
dma_init_write(esp, addr, count);
}
static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
}
static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
int sz = sp->SCp.buffers_residual;
struct scatterlist *sg = sp->SCp.buffer;
while (sz >= 0) {
sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset;
sz--;
}
sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
}
static void dma_advance_sg(struct scsi_cmnd * sp)
{
sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
}
static void pmaz_dma_drain(struct NCR_ESP *esp)
{
memcpy(phys_to_virt(esp_virt_buffer),
(void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
ESP_TGT_DMA_SIZE),
scsi_current_length);
}
static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
{
volatile u32 *dmareg =
(volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
if (length > ESP_TGT_DMA_SIZE)
length = ESP_TGT_DMA_SIZE;
*dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
iob();
esp_virt_buffer = vaddress;
scsi_current_length = length;
}
static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
{
volatile u32 *dmareg =
(volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
memcpy((void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
ESP_TGT_DMA_SIZE),
phys_to_virt(vaddress), length);
wmb();
*dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
iob();
}
static void pmaz_dma_ints_off(struct NCR_ESP *esp)
{
}
static void pmaz_dma_ints_on(struct NCR_ESP *esp)
{
}
static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
{
/*
* DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if (write)
pmaz_dma_init_read(esp, addr, count);
else
pmaz_dma_init_write(esp, addr, count);
}
static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
{
sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
}
#ifdef CONFIG_TC
static int __init dec_esp_tc_probe(struct device *dev);
static int __exit dec_esp_tc_remove(struct device *dev);
static const struct tc_device_id dec_esp_tc_table[] = {
{ "DEC ", "PMAZ-AA " },
{ }
};
MODULE_DEVICE_TABLE(tc, dec_esp_tc_table);
static struct tc_driver dec_esp_tc_driver = {
.id_table = dec_esp_tc_table,
.driver = {
.name = "dec_esp",
.bus = &tc_bus_type,
.probe = dec_esp_tc_probe,
.remove = __exit_p(dec_esp_tc_remove),
},
};
static int __init dec_esp_tc_probe(struct device *dev)
{
int status = dec_esp_probe(dev);
if (!status)
get_device(dev);
return status;
}
static int __exit dec_esp_tc_remove(struct device *dev)
{
put_device(dev);
dec_esp_remove(dev);
return 0;
}
#endif
static int __init dec_esp_init(void)
{
int status;
status = tc_register_driver(&dec_esp_tc_driver);
if (!status)
dec_esp_platform_probe();
if (nesps) {
pr_info("ESP: Total of %d ESP hosts found, "
"%d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
}
return status;
}
static void __exit dec_esp_exit(void)
{
dec_esp_platform_remove();
tc_unregister_driver(&dec_esp_tc_driver);
}
module_init(dec_esp_init);
module_exit(dec_esp_exit);

View file

@ -1,421 +0,0 @@
/* fastlane.c: Driver for Phase5's Fastlane SCSI Controller.
*
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* This driver is based on the CyberStorm driver, hence the occasional
* reference to CyberStorm.
*
* Betatesting & crucial adjustments by
* Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
*
*/
/* TODO:
*
* o According to the doc from laire, it is required to reset the DMA when
* the transfer is done. ATM we reset DMA just before every new
* dma_init_(read|write).
*
* 1) Figure out how to make a cleaner merge with the sparc driver with regard
* to the caches and the Sparc MMU mapping.
* 2) Make as few routines required outside the generic driver. A lot of the
* routines in this file used to be inline!
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include <asm/pgtable.h>
/* Such day has just come... */
#if 0
/* Let this defined unless you really need to enable DMA IRQ one day */
#define NODMAIRQ
#endif
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define FASTLANE_ESP_ADDR 0x1000001
#define FASTLANE_DMA_ADDR 0x1000041
/* The Fastlane DMA interface */
struct fastlane_dma_registers {
volatile unsigned char cond_reg; /* DMA status (ro) [0x0000] */
#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
unsigned char dmapad1[0x3f];
volatile unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
};
/* DMA status bits */
#define FASTLANE_DMA_MINT 0x80
#define FASTLANE_DMA_IACT 0x40
#define FASTLANE_DMA_CREQ 0x20
/* DMA control bits */
#define FASTLANE_DMA_FCODE 0xa0
#define FASTLANE_DMA_MASK 0xf3
#define FASTLANE_DMA_LED 0x10 /* HD led control 1 = on */
#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_irq_exit(struct NCR_ESP *esp);
static void dma_led_off(struct NCR_ESP *esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
* to ctrl_reg. Always write a copy
* to this register when writing to
* the hardware register!
*/
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
static inline void dma_clear(struct NCR_ESP *esp)
{
struct fastlane_dma_registers *dregs =
(struct fastlane_dma_registers *) (esp->dregs);
unsigned long *t;
ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
dregs->ctrl_reg = ctrl_data;
t = (unsigned long *)(esp->edev);
dregs->clear_strobe = 0;
*t = 0 ;
}
/***************************************************************** Detection */
int __init fastlane_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060, z))) {
unsigned long board = z->resource.start;
if (request_mem_region(board+FASTLANE_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
/* Check if this is really a fastlane controller. The problem
* is that also the cyberstorm and blizzard controllers use
* this ID value. Fortunately only Fastlane maps in Z3 space
*/
if (board < 0x1000000) {
goto err_release;
}
esp = esp_allocate(tpnt, (void *)board + FASTLANE_ESP_ADDR, 0);
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = 0;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = &dma_irq_exit;
esp->dma_led_on = &dma_led_on;
esp->dma_led_off = &dma_led_off;
esp->dma_poll = 0;
esp->dma_reset = 0;
/* Initialize the portBits (enable IRQs) */
ctrl_data = (FASTLANE_DMA_FCODE |
#ifndef NODMAIRQ
FASTLANE_DMA_EDI |
#endif
FASTLANE_DMA_ESI);
/* SCSI chip clock */
esp->cfreq = 40000000;
/* Map the physical address space into virtual kernel space */
address = (unsigned long)
z_ioremap(board, z->resource.end-board+1);
if(!address){
printk("Could not remap Fastlane controller memory!");
goto err_unregister;
}
/* The DMA registers on the Fastlane are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
esp->dregs = (void *)(address + FASTLANE_DMA_ADDR);
/* ESP register base */
esp->eregs = (struct ESP_regs *)(address + FASTLANE_ESP_ADDR);
/* Board base */
esp->edev = (void *) address;
/* Set the command buffer */
esp->esp_command = cmd_buffer;
esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+FASTLANE_ESP_ADDR;
if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"Fastlane SCSI", esp->ehost)) {
printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
goto err_unmap;
}
/* Controller ID */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
dma_clear(esp);
esp_initialize(esp);
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
}
}
return 0;
err_unmap:
z_iounmap((void *)address);
err_unregister:
scsi_unregister (esp->ehost);
err_release:
release_mem_region(z->resource.start+FASTLANE_ESP_ADDR,
sizeof(struct ESP_regs));
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the Fastlane DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
unsigned long sz = sp->SCp.this_residual;
if(sz > 0xfffc)
sz = 0xfffc;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
esp->esp_id, ((struct fastlane_dma_registers *)
(esp->dregs))->cond_reg));
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
struct fastlane_dma_registers *dregs =
(struct fastlane_dma_registers *) (esp->dregs);
unsigned long *t;
cache_clear(addr, length);
dma_clear(esp);
t = (unsigned long *)((addr & 0x00ffffff) + esp->edev);
dregs->clear_strobe = 0;
*t = addr;
ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE;
dregs->ctrl_reg = ctrl_data;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
struct fastlane_dma_registers *dregs =
(struct fastlane_dma_registers *) (esp->dregs);
unsigned long *t;
cache_push(addr, length);
dma_clear(esp);
t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev));
dregs->clear_strobe = 0;
*t = addr;
ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) |
FASTLANE_DMA_ENABLE |
FASTLANE_DMA_WRITE);
dregs->ctrl_reg = ctrl_data;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static void dma_irq_exit(struct NCR_ESP *esp)
{
struct fastlane_dma_registers *dregs =
(struct fastlane_dma_registers *) (esp->dregs);
dregs->ctrl_reg = ctrl_data & ~(FASTLANE_DMA_EDI|FASTLANE_DMA_ESI);
#ifdef __mc68000__
nop();
#endif
dregs->ctrl_reg = ctrl_data;
}
static int dma_irq_p(struct NCR_ESP *esp)
{
struct fastlane_dma_registers *dregs =
(struct fastlane_dma_registers *) (esp->dregs);
unsigned char dma_status;
dma_status = dregs->cond_reg;
if(dma_status & FASTLANE_DMA_IACT)
return 0; /* not our IRQ */
/* Return non-zero if ESP requested IRQ */
return (
#ifndef NODMAIRQ
(dma_status & FASTLANE_DMA_CREQ) &&
#endif
(!(dma_status & FASTLANE_DMA_MINT)) &&
(esp_read(((struct ESP_regs *) (esp->eregs))->esp_status) & ESP_STAT_INTR));
}
static void dma_led_off(struct NCR_ESP *esp)
{
ctrl_data &= ~FASTLANE_DMA_LED;
((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
}
static void dma_led_on(struct NCR_ESP *esp)
{
ctrl_data |= FASTLANE_DMA_LED;
((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
#define HOSTS_C
int fastlane_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_deallocate((struct NCR_ESP *)instance->hostdata);
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-fastlane",
.proc_info = esp_proc_info,
.name = "Fastlane SCSI",
.detect = fastlane_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = fastlane_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");

View file

@ -629,8 +629,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
int rc;
if (tcp_conn->in.datalen) {
printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
tcp_conn->in.datalen);
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2t with datalen %d\n",
tcp_conn->in.datalen);
return ISCSI_ERR_DATALEN;
}
@ -644,8 +645,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
"recovery...\n", ctask->itt);
iscsi_conn_printk(KERN_INFO, conn,
"dropping R2T itt %d in recovery.\n",
ctask->itt);
return 0;
}
@ -655,7 +657,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->exp_statsn = rhdr->statsn;
r2t->data_length = be32_to_cpu(rhdr->data_length);
if (r2t->data_length == 0) {
printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
@ -668,9 +671,10 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_offset = be32_to_cpu(rhdr->data_offset);
if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
"offset %u and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_bufflen(ctask->sc));
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with data len %u at offset %u "
"and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_bufflen(ctask->sc));
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
@ -736,8 +740,9 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/* verify PDU length */
tcp_conn->in.datalen = ntoh24(hdr->dlength);
if (tcp_conn->in.datalen > conn->max_recv_dlength) {
printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
tcp_conn->in.datalen, conn->max_recv_dlength);
iscsi_conn_printk(KERN_ERR, conn,
"iscsi_tcp: datalen %d > %d\n",
tcp_conn->in.datalen, conn->max_recv_dlength);
return ISCSI_ERR_DATALEN;
}
@ -819,10 +824,12 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
* For now we fail until we find a vendor that needs it
*/
if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
"but conn buffer is only %u (opcode %0x)\n",
tcp_conn->in.datalen,
ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
iscsi_conn_printk(KERN_ERR, conn,
"iscsi_tcp: received buffer of "
"len %u but conn buffer is only %u "
"(opcode %0x)\n",
tcp_conn->in.datalen,
ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
rc = ISCSI_ERR_PROTO;
break;
}
@ -1496,30 +1503,25 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
tcp_conn->tx_hash.flags = 0;
if (IS_ERR(tcp_conn->tx_hash.tfm)) {
printk(KERN_ERR "Could not create connection due to crc32c "
"loading error %ld. Make sure the crc32c module is "
"built as a module or into the kernel\n",
PTR_ERR(tcp_conn->tx_hash.tfm));
if (IS_ERR(tcp_conn->tx_hash.tfm))
goto free_tcp_conn;
}
tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
tcp_conn->rx_hash.flags = 0;
if (IS_ERR(tcp_conn->rx_hash.tfm)) {
printk(KERN_ERR "Could not create connection due to crc32c "
"loading error %ld. Make sure the crc32c module is "
"built as a module or into the kernel\n",
PTR_ERR(tcp_conn->rx_hash.tfm));
if (IS_ERR(tcp_conn->rx_hash.tfm))
goto free_tx_tfm;
}
return cls_conn;
free_tx_tfm:
crypto_free_hash(tcp_conn->tx_hash.tfm);
free_tcp_conn:
iscsi_conn_printk(KERN_ERR, conn,
"Could not create connection due to crc32c "
"loading error. Make sure the crc32c "
"module is built as a module or into the "
"kernel\n");
kfree(tcp_conn);
tcp_conn_alloc_fail:
iscsi_conn_teardown(cls_conn);
@ -1627,7 +1629,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
/* lookup for existing socket */
sock = sockfd_lookup((int)transport_eph, &err);
if (!sock) {
printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
iscsi_conn_printk(KERN_ERR, conn,
"sockfd_lookup failed %d\n", err);
return -EEXIST;
}
/*

View file

@ -160,7 +160,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
hdr->opcode = ISCSI_OP_SCSI_CMD;
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
hdr->itt = build_itt(ctask->itt, conn->id, session->age);
hdr->itt = build_itt(ctask->itt, session->age);
hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
hdr->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
@ -416,8 +416,9 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (datalen < 2) {
invalid_datalen:
printk(KERN_ERR "iscsi: Got CHECK_CONDITION but "
"invalid data buffer size of %d\n", datalen);
iscsi_conn_printk(KERN_ERR, conn,
"Got CHECK_CONDITION but invalid data "
"buffer size of %d\n", datalen);
sc->result = DID_BAD_TARGET << 16;
goto out;
}
@ -494,7 +495,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
if (!mtask) {
printk(KERN_ERR "Could not send nopout\n");
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return;
}
@ -522,9 +523,10 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
itt = get_itt(rejected_pdu.itt);
printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
"due to DataDigest error.\n", itt,
rejected_pdu.opcode);
iscsi_conn_printk(KERN_ERR, conn,
"itt 0x%x had pdu (op 0x%x) rejected "
"due to DataDigest error.\n", itt,
rejected_pdu.opcode);
}
}
return 0;
@ -541,8 +543,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* queuecommand or send generic. session lock must be held and verify
* itt must have been called.
*/
int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, int datalen)
static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, int datalen)
{
struct iscsi_session *session = conn->session;
int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
@ -672,7 +674,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
return rc;
}
EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, int datalen)
@ -697,18 +698,13 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (hdr->itt != RESERVED_ITT) {
if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
(session->age << ISCSI_AGE_SHIFT)) {
printk(KERN_ERR "iscsi: received itt %x expected "
"session age (%x)\n", (__force u32)hdr->itt,
session->age & ISCSI_AGE_MASK);
iscsi_conn_printk(KERN_ERR, conn,
"received itt %x expected session "
"age (%x)\n", (__force u32)hdr->itt,
session->age & ISCSI_AGE_MASK);
return ISCSI_ERR_BAD_ITT;
}
if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
(conn->id << ISCSI_CID_SHIFT)) {
printk(KERN_ERR "iscsi: received itt %x, expected "
"CID (%x)\n", (__force u32)hdr->itt, conn->id);
return ISCSI_ERR_BAD_ITT;
}
itt = get_itt(hdr->itt);
} else
itt = ~0U;
@ -717,16 +713,17 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
ctask = session->cmds[itt];
if (!ctask->sc) {
printk(KERN_INFO "iscsi: dropping ctask with "
"itt 0x%x\n", ctask->itt);
iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
"with itt 0x%x\n", ctask->itt);
/* force drop */
return ISCSI_ERR_NO_SCSI_CMD;
}
if (ctask->sc->SCp.phase != session->age) {
printk(KERN_ERR "iscsi: ctask's session age %d, "
"expected %d\n", ctask->sc->SCp.phase,
session->age);
iscsi_conn_printk(KERN_ERR, conn,
"iscsi: ctask's session age %d, "
"expected %d\n", ctask->sc->SCp.phase,
session->age);
return ISCSI_ERR_SESSION_FAILED;
}
}
@ -771,7 +768,7 @@ static void iscsi_prep_mtask(struct iscsi_conn *conn,
*/
nop->cmdsn = cpu_to_be32(session->cmdsn);
if (hdr->itt != RESERVED_ITT) {
hdr->itt = build_itt(mtask->itt, conn->id, session->age);
hdr->itt = build_itt(mtask->itt, session->age);
/*
* TODO: We always use immediate, so we never hit this.
* If we start to send tmfs or nops as non-immediate then
@ -997,6 +994,7 @@ enum {
FAILURE_SESSION_IN_RECOVERY,
FAILURE_SESSION_RECOVERY_TIMEOUT,
FAILURE_SESSION_LOGGING_OUT,
FAILURE_SESSION_NOT_READY,
};
int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
@ -1017,6 +1015,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
session = iscsi_hostdata(host->hostdata);
spin_lock(&session->lock);
reason = iscsi_session_chkready(session_to_cls(session));
if (reason) {
sc->result = reason;
goto fault;
}
/*
* ISCSI_STATE_FAILED is a temp. state. The recovery
* code will decide what is best to do with command queued
@ -1033,18 +1037,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
switch (session->state) {
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
goto reject;
sc->result = DID_IMM_RETRY << 16;
break;
case ISCSI_STATE_LOGGING_OUT:
reason = FAILURE_SESSION_LOGGING_OUT;
goto reject;
sc->result = DID_IMM_RETRY << 16;
break;
case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
sc->result = DID_NO_CONNECT << 16;
break;
case ISCSI_STATE_TERMINATE:
reason = FAILURE_SESSION_TERMINATE;
sc->result = DID_NO_CONNECT << 16;
break;
default:
reason = FAILURE_SESSION_FREED;
sc->result = DID_NO_CONNECT << 16;
}
goto fault;
}
@ -1052,6 +1061,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
conn = session->leadconn;
if (!conn) {
reason = FAILURE_SESSION_FREED;
sc->result = DID_NO_CONNECT << 16;
goto fault;
}
@ -1091,9 +1101,7 @@ reject:
fault:
spin_unlock(&session->lock);
printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
sc->result = (DID_NO_CONNECT << 16);
debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
scsi_set_resid(sc, scsi_bufflen(sc));
sc->scsi_done(sc);
spin_lock(host->host_lock);
@ -1160,7 +1168,8 @@ failed:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_LOGGED_IN)
printk(KERN_INFO "iscsi: host reset succeeded\n");
iscsi_session_printk(KERN_INFO, session,
"host reset succeeded\n");
else
goto failed;
spin_unlock_bh(&session->lock);
@ -1239,7 +1248,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* Fail commands. session lock held and recv side suspended and xmit
* thread flushed
*/
static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
int error)
{
struct iscsi_cmd_task *ctask, *tmp;
@ -1251,7 +1261,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
if (lun == ctask->sc->device->lun || lun == -1) {
debug_scsi("failing pending sc %p itt 0x%x\n",
ctask->sc, ctask->itt);
fail_command(conn, ctask, DID_BUS_BUSY << 16);
fail_command(conn, ctask, error << 16);
}
}
@ -1259,7 +1269,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
if (lun == ctask->sc->device->lun || lun == -1) {
debug_scsi("failing requeued sc %p itt 0x%x\n",
ctask->sc, ctask->itt);
fail_command(conn, ctask, DID_BUS_BUSY << 16);
fail_command(conn, ctask, error << 16);
}
}
@ -1357,10 +1367,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
last_recv = conn->last_recv;
if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
jiffies)) {
printk(KERN_ERR "ping timeout of %d secs expired, "
"last rx %lu, last ping %lu, now %lu\n",
conn->ping_timeout, last_recv,
conn->last_ping, jiffies);
iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
"expired, last rx %lu, last ping %lu, "
"now %lu\n", conn->ping_timeout, last_recv,
conn->last_ping, jiffies);
spin_unlock(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return;
@ -1373,14 +1383,11 @@ static void iscsi_check_transport_timeouts(unsigned long data)
iscsi_send_nopout(conn, NULL);
}
next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
} else {
} else
next_timeout = last_recv + timeout;
}
if (next_timeout) {
debug_scsi("Setting next tmo %lu\n", next_timeout);
mod_timer(&conn->transport_timer, next_timeout);
}
debug_scsi("Setting next tmo %lu\n", next_timeout);
mod_timer(&conn->transport_timer, next_timeout);
done:
spin_unlock(&session->lock);
}
@ -1573,7 +1580,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
/* need to grab the recv lock then session lock */
write_lock_bh(conn->recv_lock);
spin_lock(&session->lock);
fail_all_commands(conn, sc->device->lun);
fail_all_commands(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
write_unlock_bh(conn->recv_lock);
@ -1944,9 +1951,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_irqrestore(session->host->host_lock, flags);
msleep_interruptible(500);
printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d "
"host_failed %d\n", session->host->host_busy,
session->host->host_failed);
iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
"host_busy %d host_failed %d\n",
session->host->host_busy,
session->host->host_failed);
/*
* force eh_abort() to unblock
*/
@ -1975,27 +1983,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *session = conn->session;
if (!session) {
printk(KERN_ERR "iscsi: can't start unbound connection\n");
iscsi_conn_printk(KERN_ERR, conn,
"can't start unbound connection\n");
return -EPERM;
}
if ((session->imm_data_en || !session->initial_r2t_en) &&
session->first_burst > session->max_burst) {
printk("iscsi: invalid burst lengths: "
"first_burst %d max_burst %d\n",
session->first_burst, session->max_burst);
iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
"first_burst %d max_burst %d\n",
session->first_burst, session->max_burst);
return -EINVAL;
}
if (conn->ping_timeout && !conn->recv_timeout) {
printk(KERN_ERR "iscsi: invalid recv timeout of zero "
"Using 5 seconds\n.");
iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
"zero. Using 5 seconds\n.");
conn->recv_timeout = 5;
}
if (conn->recv_timeout && !conn->ping_timeout) {
printk(KERN_ERR "iscsi: invalid ping timeout of zero "
"Using 5 seconds.\n");
iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
"zero. Using 5 seconds.\n");
conn->ping_timeout = 5;
}
@ -2019,11 +2028,9 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
conn->stop_stage = 0;
conn->tmf_state = TMF_INITIAL;
session->age++;
spin_unlock_bh(&session->lock);
iscsi_unblock_session(session_to_cls(session));
wake_up(&conn->ehwait);
return 0;
if (session->age == 16)
session->age = 0;
break;
case STOP_CONN_TERM:
conn->stop_stage = 0;
break;
@ -2032,6 +2039,8 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);
iscsi_unblock_session(session_to_cls(session));
wake_up(&conn->ehwait);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_start);
@ -2123,7 +2132,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
fail_all_commands(conn, -1);
fail_all_commands(conn, -1,
STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@ -2140,7 +2150,8 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
iscsi_start_session_recovery(session, conn, flag);
break;
default:
printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
iscsi_conn_printk(KERN_ERR, conn,
"invalid stop flag %d\n", flag);
}
}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);

View file

@ -1,751 +0,0 @@
/*
* 68k mac 53c9[46] scsi driver
*
* copyright (c) 1998, David Weis weisd3458@uni.edu
*
* debugging on Quadra 800 and 660AV Michael Schmitz, Dave Kilzer 7/98
*
* based loosely on cyber_esp.c
*/
/* these are unused for now */
#define myreadl(addr) (*(volatile unsigned int *) (addr))
#define mywritel(b, addr) ((*(volatile unsigned int *) (addr)) = (b))
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/macints.h>
#include <asm/machw.h>
#include <asm/mac_via.h>
#include <asm/pgtable.h>
#include <asm/macintosh.h>
/* #define DEBUG_MAC_ESP */
extern void esp_handle(struct NCR_ESP *esp);
extern void mac_esp_intr(int irq, void *dev_id);
static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP * esp);
static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length);
static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length);
static void dma_ints_off(struct NCR_ESP * esp);
static void dma_ints_on(struct NCR_ESP * esp);
static int dma_irq_p(struct NCR_ESP * esp);
static int dma_irq_p_quick(struct NCR_ESP * esp);
static void dma_led_off(struct NCR_ESP * esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write);
static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write);
static int esp_dafb_dma_irq_p(struct NCR_ESP * espdev);
static int esp_iosb_dma_irq_p(struct NCR_ESP * espdev);
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are transferred to the ESP chip
* via PIO.
*/
static int esp_initialized = 0;
static int setup_num_esps = -1;
static int setup_disconnect = -1;
static int setup_nosync = -1;
static int setup_can_queue = -1;
static int setup_cmd_per_lun = -1;
static int setup_sg_tablesize = -1;
#ifdef SUPPORT_TAGS
static int setup_use_tagged_queuing = -1;
#endif
static int setup_hostid = -1;
/*
* Experimental ESP inthandler; check macints.c to make sure dev_id is
* set up properly!
*/
void mac_esp_intr(int irq, void *dev_id)
{
struct NCR_ESP *esp = (struct NCR_ESP *) dev_id;
int irq_p = 0;
/* Handle the one ESP interrupt showing at this IRQ level. */
if(((esp)->irq & 0xff) == irq) {
/*
* Debug ..
*/
irq_p = esp->dma_irq_p(esp);
printk("mac_esp: irq_p %x current %p disconnected %p\n",
irq_p, esp->current_SC, esp->disconnected_SC);
/*
* Mac: if we're here, it's an ESP interrupt for sure!
*/
if((esp->current_SC || esp->disconnected_SC)) {
esp->dma_ints_off(esp);
ESPIRQ(("I%d(", esp->esp_id));
esp_handle(esp);
ESPIRQ((")"));
esp->dma_ints_on(esp);
}
}
}
/*
* Debug hooks; use for playing with the interrupt flag testing and interrupt
* acknowledge on the various machines
*/
void scsi_esp_polled(int irq, void *dev_id)
{
if (esp_initialized == 0)
return;
mac_esp_intr(irq, dev_id);
}
void fake_intr(int irq, void *dev_id)
{
#ifdef DEBUG_MAC_ESP
printk("mac_esp: got irq\n");
#endif
mac_esp_intr(irq, dev_id);
}
irqreturn_t fake_drq(int irq, void *dev_id)
{
printk("mac_esp: got drq\n");
return IRQ_HANDLED;
}
#define DRIVER_SETUP
/*
* Function : mac_esp_setup(char *str)
*
* Purpose : booter command line initialization of the overrides array,
*
* Inputs : str - parameters, separated by commas.
*
* Currently unused in the new driver; need to add settable parameters to the
* detect function.
*
*/
static int __init mac_esp_setup(char *str) {
#ifdef DRIVER_SETUP
/* Format of mac53c9x parameter is:
* mac53c9x=<num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
* Negative values mean don't change.
*/
char *this_opt;
long opt;
this_opt = strsep (&str, ",");
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
if (opt >= 0 && opt <= 2)
setup_num_esps = opt;
else if (opt > 2)
printk( "mac_esp_setup: invalid number of hosts %ld !\n", opt );
this_opt = strsep (&str, ",");
}
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
if (opt > 0)
setup_disconnect = opt;
this_opt = strsep (&str, ",");
}
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
if (opt >= 0)
setup_nosync = opt;
this_opt = strsep (&str, ",");
}
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
if (opt > 0)
setup_can_queue = opt;
this_opt = strsep (&str, ",");
}
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
if (opt > 0)
setup_cmd_per_lun = opt;
this_opt = strsep (&str, ",");
}
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
if (opt >= 0) {
setup_sg_tablesize = opt;
/* Must be <= SG_ALL (255) */
if (setup_sg_tablesize > SG_ALL)
setup_sg_tablesize = SG_ALL;
}
this_opt = strsep (&str, ",");
}
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
/* Must be between 0 and 7 */
if (opt >= 0 && opt <= 7)
setup_hostid = opt;
else if (opt > 7)
printk( "mac_esp_setup: invalid host ID %ld !\n", opt);
this_opt = strsep (&str, ",");
}
#ifdef SUPPORT_TAGS
if(this_opt) {
opt = simple_strtol( this_opt, NULL, 0 );
if (opt >= 0)
setup_use_tagged_queuing = !!opt;
}
#endif
#endif
return 1;
}
__setup("mac53c9x=", mac_esp_setup);
/*
* ESP address 'detection'
*/
unsigned long get_base(int chip_num)
{
/*
* using the chip_num and mac model, figure out where the
* chips are mapped
*/
unsigned long io_base = 0x50f00000;
unsigned int second_offset = 0x402;
unsigned long scsi_loc = 0;
switch (macintosh_config->scsi_type) {
/* 950, 900, 700 */
case MAC_SCSI_QUADRA2:
scsi_loc = io_base + 0xf000 + ((chip_num == 0) ? 0 : second_offset);
break;
/* av's */
case MAC_SCSI_QUADRA3:
scsi_loc = io_base + 0x18000 + ((chip_num == 0) ? 0 : second_offset);
break;
/* most quadra/centris models are like this */
case MAC_SCSI_QUADRA:
scsi_loc = io_base + 0x10000;
break;
default:
printk("mac_esp: get_base: hit default!\n");
scsi_loc = io_base + 0x10000;
break;
} /* switch */
printk("mac_esp: io base at 0x%lx\n", scsi_loc);
return scsi_loc;
}
/*
* Model dependent ESP setup
*/
int mac_esp_detect(struct scsi_host_template * tpnt)
{
int quick = 0;
int chipnum, chipspresent = 0;
#if 0
unsigned long timeout;
#endif
if (esp_initialized > 0)
return -ENODEV;
/* what do we have in this machine... */
if (MACHW_PRESENT(MAC_SCSI_96)) {
chipspresent ++;
}
if (MACHW_PRESENT(MAC_SCSI_96_2)) {
chipspresent ++;
}
/* number of ESPs present ? */
if (setup_num_esps >= 0) {
if (chipspresent >= setup_num_esps)
chipspresent = setup_num_esps;
else
printk("mac_esp_detect: num_hosts detected %d setup %d \n",
chipspresent, setup_num_esps);
}
/* TODO: add disconnect / nosync flags */
/* setup variables */
tpnt->can_queue =
(setup_can_queue > 0) ? setup_can_queue : 7;
tpnt->cmd_per_lun =
(setup_cmd_per_lun > 0) ? setup_cmd_per_lun : 1;
tpnt->sg_tablesize =
(setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_ALL;
if (setup_hostid >= 0)
tpnt->this_id = setup_hostid;
else {
/* use 7 as default */
tpnt->this_id = 7;
}
#ifdef SUPPORT_TAGS
if (setup_use_tagged_queuing < 0)
setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING;
#endif
for (chipnum = 0; chipnum < chipspresent; chipnum ++) {
struct NCR_ESP * esp;
esp = esp_allocate(tpnt, NULL, 0);
esp->eregs = (struct ESP_regs *) get_base(chipnum);
esp->dma_irq_p = &esp_dafb_dma_irq_p;
if (chipnum == 0) {
if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
/* most machines except those below :-) */
quick = 1;
esp->dma_irq_p = &esp_iosb_dma_irq_p;
} else if (macintosh_config->scsi_type == MAC_SCSI_QUADRA3) {
/* mostly av's */
quick = 0;
} else {
/* q950, 900, 700 */
quick = 1;
out_be32(0xf9800024, 0x1d1);
esp->dregs = (void *) 0xf9800024;
}
} else { /* chipnum */
quick = 1;
out_be32(0xf9800028, 0x1d1);
esp->dregs = (void *) 0xf9800028;
} /* chipnum == 0 */
/* use pio for command bytes; pio for message/data: TBI */
esp->do_pio_cmds = 1;
/* Set the command buffer */
esp->esp_command = (volatile unsigned char*) cmd_buffer;
esp->esp_command_dvma = (__u32) cmd_buffer;
/* various functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = NULL;
esp->dma_init_write = NULL;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_ports_p = &dma_ports_p;
/* Optional functions */
esp->dma_barrier = NULL;
esp->dma_drain = NULL;
esp->dma_invalidate = NULL;
esp->dma_irq_entry = NULL;
esp->dma_irq_exit = NULL;
esp->dma_led_on = NULL;
esp->dma_led_off = NULL;
esp->dma_poll = NULL;
esp->dma_reset = NULL;
/* SCSI chip speed */
/* below esp->cfreq = 40000000; */
if (quick) {
/* 'quick' means there's handshake glue logic like in the 5380 case */
esp->dma_setup = &dma_setup_quick;
} else {
esp->dma_setup = &dma_setup;
}
if (chipnum == 0) {
esp->irq = IRQ_MAC_SCSI;
request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp->ehost);
#if 0 /* conflicts with IOP ADB */
request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp->ehost);
#endif
if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
esp->cfreq = 16500000;
} else {
esp->cfreq = 25000000;
}
} else { /* chipnum == 1 */
esp->irq = IRQ_MAC_SCSIDRQ;
#if 0 /* conflicts with IOP ADB */
request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp->ehost);
#endif
esp->cfreq = 25000000;
}
if (quick) {
printk("esp: using quick version\n");
}
printk("esp: addr at 0x%p\n", esp->eregs);
esp->scsi_id = 7;
esp->diff = 0;
esp_initialize(esp);
} /* for chipnum */
if (chipspresent)
printk("\nmac_esp: %d esp controllers found\n", chipspresent);
esp_initialized = chipspresent;
return chipspresent;
}
static int mac_esp_release(struct Scsi_Host *shost)
{
if (shost->irq)
free_irq(shost->irq, NULL);
if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port);
scsi_unregister(shost);
return 0;
}
/*
* I've been wondering what this is supposed to do, for some time. Talking
* to Allen Briggs: These machines have an extra register someplace where the
* DRQ pin of the ESP can be monitored. That isn't useful for determining
* anything else (such as reselect interrupt or other magic) though.
* Maybe make the semantics should be changed like
* if (esp->current_SC)
* ... check DRQ flag ...
* else
* ... disconnected, check pending VIA interrupt ...
*
* There's a problem with using the dabf flag or mac_irq_pending() here: both
* seem to return 1 even though no interrupt is currently pending, resulting
* in esp_exec_cmd() holding off the next command, and possibly infinite loops
* in esp_intr().
* Short term fix: just use esp_status & ESP_STAT_INTR here, as long as we
* use simple PIO. The DRQ status will be important when implementing pseudo
* DMA mode (set up ESP transfer count, return, do a batch of bytes in PIO or
* 'hardware handshake' mode upon DRQ).
* If you plan on changing this (i.e. to save the esp_status register access in
* favor of a VIA register access or a shadow register for the IFR), make sure
* to try a debug version of this first to monitor what registers would be a good
* indicator of the ESP interrupt.
*/
static int esp_dafb_dma_irq_p(struct NCR_ESP * esp)
{
unsigned int ret;
int sreg = esp_read(esp->eregs->esp_status);
#ifdef DEBUG_MAC_ESP
printk("mac_esp: esp_dafb_dma_irq_p dafb %d irq %d\n",
readl(esp->dregs), mac_irq_pending(IRQ_MAC_SCSI));
#endif
sreg &= ESP_STAT_INTR;
/*
* maybe working; this is essentially what's used for iosb_dma_irq_p
*/
if (sreg)
return 1;
else
return 0;
/*
* didn't work ...
*/
#if 0
if (esp->current_SC)
ret = readl(esp->dregs) & 0x200;
else if (esp->disconnected_SC)
ret = 1; /* sreg ?? */
else
ret = mac_irq_pending(IRQ_MAC_SCSI);
return(ret);
#endif
}
/*
* See above: testing mac_irq_pending always returned 8 (SCSI IRQ) regardless
* of the actual ESP status.
*/
static int esp_iosb_dma_irq_p(struct NCR_ESP * esp)
{
int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
int sreg = esp_read(esp->eregs->esp_status);
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
sreg, esp->current_SC, esp->disconnected_SC);
#endif
sreg &= ESP_STAT_INTR;
if (sreg)
return (sreg);
else
return 0;
}
/*
* This seems to be OK for PIO at least ... usually 0 after PIO.
*/
static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count)
{
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma bytes sent = %x\n", fifo_count);
#endif
return fifo_count;
}
/*
* dma_can_transfer is used to switch between DMA and PIO, if DMA (pseudo)
* is ever implemented. Returning 0 here will use PIO.
*/
static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd * sp)
{
unsigned long sz = sp->SCp.this_residual;
#if 0 /* no DMA yet; make conditional */
if (sz > 0x10000000) {
sz = 0x10000000;
}
printk("mac_esp: dma can transfer = 0lx%x\n", sz);
#else
#ifdef DEBUG_MAC_ESP
printk("mac_esp: pio to transfer = %ld\n", sz);
#endif
sz = 0;
#endif
return sz;
}
/*
* Not yet ...
*/
static void dma_dump_state(struct NCR_ESP * esp)
{
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_dump_state: called\n");
#endif
#if 0
ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
esp->esp_id, ((struct mac_dma_registers *)
(esp->dregs))->cond_reg));
#endif
}
/*
* DMA setup: should be used to set up the ESP transfer count for pseudo
* DMA transfers; need a DRQ transfer function to do the actual transfer
*/
static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length)
{
printk("mac_esp: dma_init_read\n");
}
static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length)
{
printk("mac_esp: dma_init_write\n");
}
static void dma_ints_off(struct NCR_ESP * esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP * esp)
{
enable_irq(esp->irq);
}
/*
* generic dma_irq_p(), unused
*/
static int dma_irq_p(struct NCR_ESP * esp)
{
int i = esp_read(esp->eregs->esp_status);
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_irq_p status %d\n", i);
#endif
return (i & ESP_STAT_INTR);
}
static int dma_irq_p_quick(struct NCR_ESP * esp)
{
/*
* Copied from iosb_dma_irq_p()
*/
int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
int sreg = esp_read(esp->eregs->esp_status);
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
sreg, esp->current_SC, esp->disconnected_SC);
#endif
sreg &= ESP_STAT_INTR;
if (sreg)
return (sreg);
else
return 0;
}
static void dma_led_off(struct NCR_ESP * esp)
{
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_led_off: called\n");
#endif
}
static void dma_led_on(struct NCR_ESP * esp)
{
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_led_on: called\n");
#endif
}
static int dma_ports_p(struct NCR_ESP * esp)
{
return 0;
}
static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write)
{
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_setup\n");
#endif
if (write) {
dma_init_read(esp, (char *) addr, count);
} else {
dma_init_write(esp, (char *) addr, count);
}
}
static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write)
{
#ifdef DEBUG_MAC_ESP
printk("mac_esp: dma_setup_quick\n");
#endif
}
static struct scsi_host_template driver_template = {
.proc_name = "mac_esp",
.name = "Mac 53C9x SCSI",
.detect = mac_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = mac_esp_release,
.info = esp_info,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");

View file

@ -1,520 +0,0 @@
/* mca_53c9x.c: Driver for the SCSI adapter found on NCR 35xx
* (and maybe some other) Microchannel machines
*
* Code taken mostly from Cyberstorm SCSI drivers
* Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
*
* Hacked to work with the NCR MCA stuff by Tymm Twillman (tymm@computer.org)
*
* The CyberStorm SCSI driver (and this driver) is based on David S. Miller's
* ESP driver * for the Sparc computers.
*
* Special thanks to Ken Stewart at Symbios (LSI) for helping with info on
* the 86C01. I was on the brink of going ga-ga...
*
* Also thanks to Jesper Skov for helping me with info on how the Amiga
* does things...
*/
/*
* This is currently only set up to use one 53c9x card at a time; it could be
* changed fairly easily to detect/use more than one, but I'm not too sure how
* many cards that use the 53c9x on MCA systems there are (if, in fact, there
* are cards that use them, other than the one built into some NCR systems)...
* If anyone requests this, I'll throw it in, otherwise it's not worth the
* effort.
*/
/*
* Info on the 86C01 MCA interface chip at the bottom, if you care enough to
* look.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mca.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/mca-legacy.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mca_dma.h>
#include <asm/pgtable.h>
/*
* From ibmmca.c (IBM scsi controller card driver) -- used for turning PS2 disk
* activity LED on and off
*/
#define PS2_SYS_CTR 0x92
/* Ports the ncr's 53c94 can be put at; indexed by pos register value */
#define MCA_53C9X_IO_PORTS { \
0x0000, 0x0240, 0x0340, 0x0400, \
0x0420, 0x3240, 0x8240, 0xA240, \
}
/*
* Supposedly there were some cards put together with the 'c9x and 86c01. If
* they have different ID's from the ones on the 3500 series machines,
* you can add them here and hopefully things will work out.
*/
#define MCA_53C9X_IDS { \
0x7F4C, \
0x0000, \
}
static int dma_bytes_sent(struct NCR_ESP *, int);
static int dma_can_transfer(struct NCR_ESP *, Scsi_Cmnd *);
static void dma_dump_state(struct NCR_ESP *);
static void dma_init_read(struct NCR_ESP *, __u32, int);
static void dma_init_write(struct NCR_ESP *, __u32, int);
static void dma_ints_off(struct NCR_ESP *);
static void dma_ints_on(struct NCR_ESP *);
static int dma_irq_p(struct NCR_ESP *);
static int dma_ports_p(struct NCR_ESP *);
static void dma_setup(struct NCR_ESP *, __u32, int, int);
static void dma_led_on(struct NCR_ESP *);
static void dma_led_off(struct NCR_ESP *);
/* This is where all commands are put before they are trasfered to the
* 53c9x via PIO.
*/
static volatile unsigned char cmd_buffer[16];
/*
* We keep the structure that is used to access the registers on the 53c9x
* here.
*/
static struct ESP_regs eregs;
/***************************************************************** Detection */
static int mca_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
static int io_port_by_pos[] = MCA_53C9X_IO_PORTS;
int mca_53c9x_ids[] = MCA_53C9X_IDS;
int *id_to_check = mca_53c9x_ids;
int slot;
int pos[3];
unsigned int tmp_io_addr;
unsigned char tmp_byte;
if (!MCA_bus)
return 0;
while (*id_to_check) {
if ((slot = mca_find_adapter(*id_to_check, 0)) !=
MCA_NOTFOUND)
{
esp = esp_allocate(tpnt, NULL, 0);
pos[0] = mca_read_stored_pos(slot, 2);
pos[1] = mca_read_stored_pos(slot, 3);
pos[2] = mca_read_stored_pos(slot, 4);
esp->eregs = &eregs;
/*
* IO port base is given in the first (non-ID) pos
* register, like so:
*
* Bits 3 2 1 IO base
* ----------------------------
* 0 0 0 <disabled>
* 0 0 1 0x0240
* 0 1 0 0x0340
* 0 1 1 0x0400
* 1 0 0 0x0420
* 1 0 1 0x3240
* 1 1 0 0x8240
* 1 1 1 0xA240
*/
tmp_io_addr =
io_port_by_pos[(pos[0] & 0x0E) >> 1];
esp->eregs->io_addr = tmp_io_addr + 0x10;
if (esp->eregs->io_addr == 0x0000) {
printk("Adapter is disabled.\n");
break;
}
/*
* IRQ is specified in bits 4 and 5:
*
* Bits 4 5 IRQ
* -----------------------
* 0 0 3
* 0 1 5
* 1 0 7
* 1 1 9
*/
esp->irq = ((pos[0] & 0x30) >> 3) + 3;
/*
* DMA channel is in the low 3 bits of the second
* POS register
*/
esp->dma = pos[1] & 7;
esp->slot = slot;
if (request_irq(esp->irq, esp_intr, 0,
"NCR 53c9x SCSI", esp->ehost))
{
printk("Unable to request IRQ %d.\n", esp->irq);
esp_deallocate(esp);
scsi_unregister(esp->ehost);
return 0;
}
if (request_dma(esp->dma, "NCR 53c9x SCSI")) {
printk("Unable to request DMA channel %d.\n",
esp->dma);
free_irq(esp->irq, esp_intr);
esp_deallocate(esp);
scsi_unregister(esp->ehost);
return 0;
}
request_region(tmp_io_addr, 32, "NCR 53c9x SCSI");
/*
* 86C01 handles DMA, IO mode, from address
* (base + 0x0a)
*/
mca_disable_dma(esp->dma);
mca_set_dma_io(esp->dma, tmp_io_addr + 0x0a);
mca_enable_dma(esp->dma);
/* Tell the 86C01 to give us interrupts */
tmp_byte = inb(tmp_io_addr + 0x02) | 0x40;
outb(tmp_byte, tmp_io_addr + 0x02);
/*
* Scsi ID -- general purpose register, hi
* 2 bits; add 4 to this number to get the
* ID
*/
esp->scsi_id = ((pos[2] & 0xC0) >> 6) + 4;
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = NULL;
esp->dma_drain = NULL;
esp->dma_invalidate = NULL;
esp->dma_irq_entry = NULL;
esp->dma_irq_exit = NULL;
esp->dma_led_on = dma_led_on;
esp->dma_led_off = dma_led_off;
esp->dma_poll = NULL;
esp->dma_reset = NULL;
/* Set the command buffer */
esp->esp_command = (volatile unsigned char*)
cmd_buffer;
esp->esp_command_dvma = isa_virt_to_bus(cmd_buffer);
/* SCSI chip speed */
esp->cfreq = 25000000;
/* Differential SCSI? I think not. */
esp->diff = 0;
esp_initialize(esp);
printk(" Adapter found in slot %2d: io port 0x%x "
"irq %d dma channel %d\n", slot + 1, tmp_io_addr,
esp->irq, esp->dma);
mca_set_adapter_name(slot, "NCR 53C9X SCSI Adapter");
mca_mark_as_used(slot);
break;
}
id_to_check++;
}
return esps_in_use;
}
/******************************************************************* Release */
static int mca_esp_release(struct Scsi_Host *host)
{
struct NCR_ESP *esp = (struct NCR_ESP *)host->hostdata;
unsigned char tmp_byte;
esp_deallocate(esp);
/*
* Tell the 86C01 to stop sending interrupts
*/
tmp_byte = inb(esp->eregs->io_addr - 0x0E);
tmp_byte &= ~0x40;
outb(tmp_byte, esp->eregs->io_addr - 0x0E);
free_irq(esp->irq, esp_intr);
free_dma(esp->dma);
mca_mark_as_unused(esp->slot);
return 0;
}
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Ask the 53c9x. It knows. */
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
/*
* The MCA dma channels can only do up to 128K bytes at a time.
* (16 bit mode)
*/
unsigned long sz = sp->SCp.this_residual;
if(sz > 0x20000)
sz = 0x20000;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
/*
* Doesn't quite match up to the other drivers, but we do what we
* can.
*/
ESPLOG(("esp%d: dma channel <%d>\n", esp->esp_id, esp->dma));
ESPLOG(("bytes left to dma: %d\n", mca_get_dma_residue(esp->dma)));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
{
unsigned long flags;
save_flags(flags);
cli();
mca_disable_dma(esp->dma);
mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_16 |
MCA_DMA_MODE_IO);
mca_set_dma_addr(esp->dma, addr);
mca_set_dma_count(esp->dma, length / 2); /* !!! */
mca_enable_dma(esp->dma);
restore_flags(flags);
}
static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
{
unsigned long flags;
save_flags(flags);
cli();
mca_disable_dma(esp->dma);
mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_WRITE |
MCA_DMA_MODE_16 | MCA_DMA_MODE_IO);
mca_set_dma_addr(esp->dma, addr);
mca_set_dma_count(esp->dma, length / 2); /* !!! */
mca_enable_dma(esp->dma);
restore_flags(flags);
}
static void dma_ints_off(struct NCR_ESP *esp)
{
/*
* Tell the 'C01 to shut up. All interrupts are routed through it.
*/
outb(inb(esp->eregs->io_addr - 0x0E) & ~0x40,
esp->eregs->io_addr - 0x0E);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
/*
* Ok. You can speak again.
*/
outb(inb(esp->eregs->io_addr - 0x0E) | 0x40,
esp->eregs->io_addr - 0x0E);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
/*
* DaveM says that this should return a "yes" if there is an interrupt
* or a DMA error occurred. I copied the Amiga driver's semantics,
* though, because it seems to work and we can't really tell if
* a DMA error happened. This gives the "yes" if the scsi chip
* is sending an interrupt and no DMA activity is taking place
*/
return (!(inb(esp->eregs->io_addr - 0x04) & 1) &&
!(inb(esp->eregs->io_addr - 0x04) & 2) );
}
static int dma_ports_p(struct NCR_ESP *esp)
{
/*
* Check to see if interrupts are enabled on the 'C01 (in case abort
* is entered multiple times, so we only do the abort once)
*/
return (inb(esp->eregs->io_addr - 0x0E) & 0x40) ? 1:0;
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
if(write){
dma_init_write(esp, addr, count);
} else {
dma_init_read(esp, addr, count);
}
}
/*
* These will not play nicely with other disk controllers that try to use the
* disk active LED... but what can you do? Don't answer that.
*
* Stolen shamelessly from ibmmca.c -- IBM Microchannel SCSI adapter driver
*
*/
static void dma_led_on(struct NCR_ESP *esp)
{
outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR);
}
static void dma_led_off(struct NCR_ESP *esp)
{
outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR);
}
static struct scsi_host_template driver_template = {
.proc_name = "mca_53c9x",
.name = "NCR 53c9x SCSI",
.detect = mca_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = mca_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.unchecked_isa_dma = 1,
.use_clustering = DISABLE_CLUSTERING
};
#include "scsi_module.c"
/*
* OK, here's the goods I promised. The NCR 86C01 is an MCA interface chip
* that handles enabling/diabling IRQ, dma interfacing, IO port selection
* and other fun stuff. It takes up 16 addresses, and the chip it is
* connnected to gets the following 16. Registers are as follows:
*
* Offsets 0-1 : Card ID
*
* Offset 2 : Mode enable register --
* Bit 7 : Data Word width (1 = 16, 0 = 8)
* Bit 6 : IRQ enable (1 = enabled)
* Bits 5,4 : IRQ select
* 0 0 : IRQ 3
* 0 1 : IRQ 5
* 1 0 : IRQ 7
* 1 1 : IRQ 9
* Bits 3-1 : Base Address
* 0 0 0 : <disabled>
* 0 0 1 : 0x0240
* 0 1 0 : 0x0340
* 0 1 1 : 0x0400
* 1 0 0 : 0x0420
* 1 0 1 : 0x3240
* 1 1 0 : 0x8240
* 1 1 1 : 0xA240
* Bit 0 : Card enable (1 = enabled)
*
* Offset 3 : DMA control register --
* Bit 7 : DMA enable (1 = enabled)
* Bits 6,5 : Preemt Count Select (transfers to complete after
* 'C01 has been preempted on MCA bus)
* 0 0 : 0
* 0 1 : 1
* 1 0 : 3
* 1 1 : 7
* (all these wacky numbers; I'm sure there's a reason somewhere)
* Bit 4 : Fairness enable (1 = fair bus priority)
* Bits 3-0 : Arbitration level (0-15 consecutive)
*
* Offset 4 : General purpose register
* Bits 7-3 : User definable (here, 7,6 are SCSI ID)
* Bits 2-0 : reserved
*
* Offset 10 : DMA decode register (used for IO based DMA; also can do
* PIO through this port)
*
* Offset 12 : Status
* Bits 7-2 : reserved
* Bit 1 : DMA pending (1 = pending)
* Bit 0 : IRQ pending (0 = pending)
*
* Exciting, huh?
*
*/

View file

@ -1,606 +0,0 @@
/*
* Oktagon_esp.c -- Driver for bsc Oktagon
*
* Written by Carsten Pluntke 1998
*
* Based on cyber_esp.c
*/
#if defined(CONFIG_AMIGA) || defined(CONFIG_APUS)
#define USE_BOTTOM_HALF
#endif
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/reboot.h>
#include <asm/system.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <linux/zorro.h>
#include <asm/irq.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#ifdef USE_BOTTOM_HALF
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#endif
/* The controller registers can be found in the Z2 config area at these
* offsets:
*/
#define OKTAGON_ESP_ADDR 0x03000
#define OKTAGON_DMA_ADDR 0x01000
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_led_off(struct NCR_ESP *esp);
static void dma_led_on(struct NCR_ESP *esp);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static void dma_irq_exit(struct NCR_ESP *esp);
static void dma_invalidate(struct NCR_ESP *esp);
static void dma_mmu_get_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
static void dma_mmu_get_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
static void dma_mmu_release_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
static void dma_mmu_release_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
static void dma_advance_sg(Scsi_Cmnd *);
static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
#ifdef USE_BOTTOM_HALF
static void dma_commit(struct work_struct *unused);
long oktag_to_io(long *paddr, long *addr, long len);
long oktag_from_io(long *addr, long *paddr, long len);
static DECLARE_WORK(tq_fake_dma, dma_commit);
#define DMA_MAXTRANSFER 0x8000
#else
/*
* No bottom half. Use transfer directly from IRQ. Find a narrow path
* between too much IRQ overhead and clogging the IRQ for too long.
*/
#define DMA_MAXTRANSFER 0x1000
#endif
static struct notifier_block oktagon_notifier = {
oktagon_notify_reboot,
NULL,
0
};
static long *paddress;
static long *address;
static long len;
static long dma_on;
static int direction;
static struct NCR_ESP *current_esp;
static volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
* before they are trasfered to the ESP chip
* via PIO.
*/
/***************************************************************** Detection */
int oktagon_esp_detect(struct scsi_host_template *tpnt)
{
struct NCR_ESP *esp;
struct zorro_dev *z = NULL;
unsigned long address;
struct ESP_regs *eregs;
while ((z = zorro_find_device(ZORRO_PROD_BSC_OKTAGON_2008, z))) {
unsigned long board = z->resource.start;
if (request_mem_region(board+OKTAGON_ESP_ADDR,
sizeof(struct ESP_regs), "NCR53C9x")) {
/*
* It is a SCSI controller.
* Hardwire Host adapter to SCSI ID 7
*/
address = (unsigned long)ZTWO_VADDR(board);
eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR);
/* This line was 5 lines lower */
esp = esp_allocate(tpnt, (void *)board + OKTAGON_ESP_ADDR, 0);
/* we have to shift the registers only one bit for oktagon */
esp->shift = 1;
esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
udelay(5);
if (esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
return 0; /* Bail out if address did not hold data */
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = 0;
esp->dma_drain = 0;
esp->dma_invalidate = &dma_invalidate;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = &dma_irq_exit;
esp->dma_led_on = &dma_led_on;
esp->dma_led_off = &dma_led_off;
esp->dma_poll = 0;
esp->dma_reset = 0;
esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
esp->dma_advance_sg = &dma_advance_sg;
/* SCSI chip speed */
/* Looking at the quartz of the SCSI board... */
esp->cfreq = 25000000;
/* The DMA registers on the CyberStorm are mapped
* relative to the device (i.e. in the same Zorro
* I/O block).
*/
esp->dregs = (void *)(address + OKTAGON_DMA_ADDR);
paddress = (long *) esp->dregs;
/* ESP register base */
esp->eregs = eregs;
/* Set the command buffer */
esp->esp_command = (volatile unsigned char*) cmd_buffer;
/* Yes, the virtual address. See below. */
esp->esp_command_dvma = (__u32) cmd_buffer;
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
"BSC Oktagon SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
/* We don't have a differential SCSI-bus. */
esp->diff = 0;
esp_initialize(esp);
printk("ESP_Oktagon Driver 1.1"
#ifdef USE_BOTTOM_HALF
" [BOTTOM_HALF]"
#else
" [IRQ]"
#endif
" registered.\n");
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use);
esps_running = esps_in_use;
current_esp = esp;
register_reboot_notifier(&oktagon_notifier);
return esps_in_use;
}
}
return 0;
}
/*
* On certain configurations the SCSI equipment gets confused on reboot,
* so we have to reset it then.
*/
static int
oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
{
struct NCR_ESP *esp;
if((code == SYS_DOWN || code == SYS_HALT) && (esp = current_esp))
{
esp_bootup_reset(esp,esp->eregs);
udelay(500); /* Settle time. Maybe unnecessary. */
}
return NOTIFY_DONE;
}
#ifdef USE_BOTTOM_HALF
/*
* The bsc Oktagon controller has no real DMA, so we have to do the 'DMA
* transfer' in the interrupt (Yikes!) or use a bottom half to not to clutter
* IRQ's for longer-than-good.
*
* FIXME
* BIG PROBLEM: 'len' is usually the buffer length, not the expected length
* of the data. So DMA may finish prematurely, further reads lead to
* 'machine check' on APUS systems (don't know about m68k systems, AmigaOS
* deliberately ignores the bus faults) and a normal copy-loop can't
* be exited prematurely just at the right moment by the dma_invalidate IRQ.
* So do it the hard way, write an own copier in assembler and
* catch the exception.
* -- Carsten
*/
static void dma_commit(struct work_struct *unused)
{
long wait,len2,pos;
struct NCR_ESP *esp;
ESPDATA(("Transfer: %ld bytes, Address 0x%08lX, Direction: %d\n",
len,(long) address,direction));
dma_ints_off(current_esp);
pos = 0;
wait = 1;
if(direction) /* write? (memory to device) */
{
while(len > 0)
{
len2 = oktag_to_io(paddress, address+pos, len);
if(!len2)
{
if(wait > 1000)
{
printk("Expedited DMA exit (writing) %ld\n",len);
break;
}
mdelay(wait);
wait *= 2;
}
pos += len2;
len -= len2*sizeof(long);
}
} else {
while(len > 0)
{
len2 = oktag_from_io(address+pos, paddress, len);
if(!len2)
{
if(wait > 1000)
{
printk("Expedited DMA exit (reading) %ld\n",len);
break;
}
mdelay(wait);
wait *= 2;
}
pos += len2;
len -= len2*sizeof(long);
}
}
/* to make esp->shift work */
esp=current_esp;
#if 0
len2 = (esp_read(current_esp->eregs->esp_tclow) & 0xff) |
((esp_read(current_esp->eregs->esp_tcmed) & 0xff) << 8);
/*
* Uh uh. If you see this, len and transfer count registers were out of
* sync. That means really serious trouble.
*/
if(len2)
printk("Eeeek!! Transfer count still %ld!\n",len2);
#endif
/*
* Normally we just need to exit and wait for the interrupt to come.
* But at least one device (my Microtek ScanMaker 630) regularly mis-
* calculates the bytes it should send which is really ugly because
* it locks up the SCSI bus if not accounted for.
*/
if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
{
long len = 100;
long trash[10];
/*
* Interrupt bit was not set. Either the device is just plain lazy
* so we give it a 10 ms chance or...
*/
while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
udelay(100);
if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
{
/*
* So we think that the transfer count is out of sync. Since we
* have all we want we are happy and can ditch the trash.
*/
len = DMA_MAXTRANSFER;
while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
oktag_from_io(trash,paddress,2);
if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
{
/*
* Things really have gone wrong. If we leave the system in that
* state, the SCSI bus is locked forever. I hope that this will
* turn the system in a more or less running state.
*/
printk("Device is bolixed, trying bus reset...\n");
esp_bootup_reset(current_esp,current_esp->eregs);
}
}
}
ESPDATA(("Transfer_finale: do_data_finale should come\n"));
len = 0;
dma_on = 0;
dma_ints_on(current_esp);
}
#endif
/************************************************************* DMA Functions */
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
{
/* Since the CyberStorm DMA is fully dedicated to the ESP chip,
* the number of bytes sent (to the ESP chip) equals the number
* of bytes in the FIFO - there is no buffering in the DMA controller.
* XXXX Do I read this right? It is from host to ESP, right?
*/
return fifo_count;
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
unsigned long sz = sp->SCp.this_residual;
if(sz > DMA_MAXTRANSFER)
sz = DMA_MAXTRANSFER;
return sz;
}
static void dma_dump_state(struct NCR_ESP *esp)
{
}
/*
* What the f$@& is this?
*
* Some SCSI devices (like my Microtek ScanMaker 630 scanner) want to transfer
* more data than requested. How much? Dunno. So ditch the bogus data into
* the sink, hoping the device will advance to the next phase sooner or later.
*
* -- Carsten
*/
static long oktag_eva_buffer[16]; /* The data sink */
static void oktag_check_dma(void)
{
struct NCR_ESP *esp;
esp=current_esp;
if(!len)
{
address = oktag_eva_buffer;
len = 2;
/* esp_do_data sets them to zero like len */
esp_write(current_esp->eregs->esp_tclow,2);
esp_write(current_esp->eregs->esp_tcmed,0);
}
}
static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
{
/* Zorro is noncached, everything else done using processor. */
/* cache_clear(addr, length); */
if(dma_on)
panic("dma_init_read while dma process is initialized/running!\n");
direction = 0;
address = (long *) vaddress;
current_esp = esp;
len = length;
oktag_check_dma();
dma_on = 1;
}
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
{
/* cache_push(addr, length); */
if(dma_on)
panic("dma_init_write while dma process is initialized/running!\n");
direction = 1;
address = (long *) vaddress;
current_esp = esp;
len = length;
oktag_check_dma();
dma_on = 1;
}
static void dma_ints_off(struct NCR_ESP *esp)
{
disable_irq(esp->irq);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
enable_irq(esp->irq);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
/* It's important to check the DMA IRQ bit in the correct way! */
return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
}
static void dma_led_off(struct NCR_ESP *esp)
{
}
static void dma_led_on(struct NCR_ESP *esp)
{
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
{
/* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
* so when (write) is true, it actually means READ!
*/
if(write){
dma_init_read(esp, addr, count);
} else {
dma_init_write(esp, addr, count);
}
}
/*
* IRQ entry when DMA transfer is ready to be started
*/
static void dma_irq_exit(struct NCR_ESP *esp)
{
#ifdef USE_BOTTOM_HALF
if(dma_on)
{
schedule_work(&tq_fake_dma);
}
#else
while(len && !dma_irq_p(esp))
{
if(direction)
*paddress = *address++;
else
*address++ = *paddress;
len -= (sizeof(long));
}
len = 0;
dma_on = 0;
#endif
}
/*
* IRQ entry when DMA has just finished
*/
static void dma_invalidate(struct NCR_ESP *esp)
{
}
/*
* Since the processor does the data transfer we have to use the custom
* mmu interface to pass the virtual address, not the physical.
*/
void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
sp->SCp.ptr =
sp->request_buffer;
}
void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
sp->SCp.ptr = sg_virt(sp->SCp.buffer);
}
void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
}
void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
}
void dma_advance_sg(Scsi_Cmnd *sp)
{
sp->SCp.ptr = sg_virt(sp->SCp.buffer);
}
#define HOSTS_C
int oktagon_esp_release(struct Scsi_Host *instance)
{
#ifdef MODULE
unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
esp_release();
release_mem_region(address, sizeof(struct ESP_regs));
free_irq(IRQ_AMIGA_PORTS, esp_intr);
unregister_reboot_notifier(&oktagon_notifier);
#endif
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "esp-oktagon",
.proc_info = &esp_proc_info,
.name = "BSC Oktagon SCSI",
.detect = oktagon_esp_detect,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.release = oktagon_esp_release,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING
};
#include "scsi_module.c"
MODULE_LICENSE("GPL");

View file

@ -1,194 +0,0 @@
/* -*- mode: asm -*-
* Due to problems while transferring data I've put these routines as assembly
* code.
* Since I'm no PPC assembler guru, the code is just the assembler version of
int oktag_to_io(long *paddr,long *addr,long len)
{
long *addr2 = addr;
for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
*paddr = *addr2++;
return addr2 - addr;
}
int oktag_from_io(long *addr,long *paddr,long len)
{
long *addr2 = addr;
for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
*addr2++ = *paddr;
return addr2 - addr;
}
* assembled using gcc -O2 -S, with two exception catch points where data
* is moved to/from the IO register.
*/
#ifdef CONFIG_APUS
.file "oktagon_io.c"
gcc2_compiled.:
/*
.section ".text"
*/
.align 2
.globl oktag_to_io
.type oktag_to_io,@function
oktag_to_io:
addi 5,5,3
srwi 5,5,2
cmpwi 1,5,0
mr 9,3
mr 3,4
addi 5,5,-1
bc 12,6,.L3
.L5:
cmpwi 1,5,0
lwz 0,0(3)
addi 3,3,4
addi 5,5,-1
exp1: stw 0,0(9)
bc 4,6,.L5
.L3:
ret1: subf 3,4,3
srawi 3,3,2
blr
.Lfe1:
.size oktag_to_io,.Lfe1-oktag_to_io
.align 2
.globl oktag_from_io
.type oktag_from_io,@function
oktag_from_io:
addi 5,5,3
srwi 5,5,2
cmpwi 1,5,0
mr 9,3
addi 5,5,-1
bc 12,6,.L9
.L11:
cmpwi 1,5,0
exp2: lwz 0,0(4)
addi 5,5,-1
stw 0,0(3)
addi 3,3,4
bc 4,6,.L11
.L9:
ret2: subf 3,9,3
srawi 3,3,2
blr
.Lfe2:
.size oktag_from_io,.Lfe2-oktag_from_io
.ident "GCC: (GNU) egcs-2.90.29 980515 (egcs-1.0.3 release)"
/*
* Exception table.
* Second longword shows where to jump when an exception at the addr the first
* longword is pointing to is caught.
*/
.section __ex_table,"a"
.align 2
oktagon_except:
.long exp1,ret1
.long exp2,ret2
#else
/*
The code which follows is for 680x0 based assembler and is meant for
Linux/m68k. It was created by cross compiling the code using the
instructions given above. I then added the four labels used in the
exception handler table at the bottom of this file.
- Kevin <kcozens@interlog.com>
*/
#ifdef CONFIG_AMIGA
.file "oktagon_io.c"
.version "01.01"
gcc2_compiled.:
.text
.align 2
.globl oktag_to_io
.type oktag_to_io,@function
oktag_to_io:
link.w %a6,#0
move.l %d2,-(%sp)
move.l 8(%a6),%a1
move.l 12(%a6),%d1
move.l %d1,%a0
move.l 16(%a6),%d0
addq.l #3,%d0
lsr.l #2,%d0
subq.l #1,%d0
moveq.l #-1,%d2
cmp.l %d0,%d2
jbeq .L3
.L5:
exp1:
move.l (%a0)+,(%a1)
dbra %d0,.L5
clr.w %d0
subq.l #1,%d0
jbcc .L5
.L3:
ret1:
move.l %a0,%d0
sub.l %d1,%d0
asr.l #2,%d0
move.l -4(%a6),%d2
unlk %a6
rts
.Lfe1:
.size oktag_to_io,.Lfe1-oktag_to_io
.align 2
.globl oktag_from_io
.type oktag_from_io,@function
oktag_from_io:
link.w %a6,#0
move.l %d2,-(%sp)
move.l 8(%a6),%d1
move.l 12(%a6),%a1
move.l %d1,%a0
move.l 16(%a6),%d0
addq.l #3,%d0
lsr.l #2,%d0
subq.l #1,%d0
moveq.l #-1,%d2
cmp.l %d0,%d2
jbeq .L9
.L11:
exp2:
move.l (%a1),(%a0)+
dbra %d0,.L11
clr.w %d0
subq.l #1,%d0
jbcc .L11
.L9:
ret2:
move.l %a0,%d0
sub.l %d1,%d0
asr.l #2,%d0
move.l -4(%a6),%d2
unlk %a6
rts
.Lfe2:
.size oktag_from_io,.Lfe2-oktag_from_io
.ident "GCC: (GNU) 2.7.2.1"
/*
* Exception table.
* Second longword shows where to jump when an exception at the addr the first
* longword is pointing to is caught.
*/
.section __ex_table,"a"
.align 2
oktagon_except:
.long exp1,ret1
.long exp2,ret2
#endif
#endif

View file

@ -35,7 +35,7 @@
#define BOUNCE_SIZE (64*1024)
#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE)
#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9)
struct ps3rom_private {

View file

@ -428,6 +428,19 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
return 0;
if (ha->sfp_data)
goto do_read;
ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->sfp_data_dma);
if (!ha->sfp_data) {
qla_printk(KERN_WARNING, ha,
"Unable to allocate memory for SFP read-data.\n");
return 0;
}
do_read:
memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
addr = 0xa0;
for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
iter++, offset += SFP_BLOCK_SIZE) {
@ -835,7 +848,7 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
static void
qla2x00_get_host_speed(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
uint32_t speed = 0;
switch (ha->link_data_rate) {
@ -848,6 +861,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_4GB:
speed = 4;
break;
case PORT_SPEED_8GB:
speed = 8;
break;
}
fc_host_speed(shost) = speed;
}
@ -855,7 +871,7 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_type(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
uint32_t port_type = FC_PORTTYPE_UNKNOWN;
switch (ha->current_topology) {
@ -965,7 +981,7 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
int rval;
struct link_statistics *stats;
dma_addr_t stats_dma;
@ -1049,7 +1065,7 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_state(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = shost_priv(shost);
scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
if (!ha->flags.online)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;

View file

@ -2041,8 +2041,6 @@ typedef struct vport_params {
#define VP_RET_CODE_NO_MEM 5
#define VP_RET_CODE_NOT_FOUND 6
#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
/*
* ISP operations
*/

View file

@ -66,6 +66,7 @@ extern int ql2xqfullrampup;
extern int num_hosts;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
/*
* Global Functions in qla_mid.c source file.

View file

@ -925,6 +925,16 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
{
int rval;
uint32_t srisc_address = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
RD_REG_WORD(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(ha, &srisc_address);
@ -968,6 +978,19 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
}
}
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Enable proper parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2300(ha))
/* SRAM parity */
WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
else
/* SRAM, Instruction RAM and GP RAM parity */
WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
RD_REG_WORD(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
if (rval) {
DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
ha->host_no));
@ -3213,9 +3236,6 @@ int
qla2x00_abort_isp(scsi_qla_host_t *ha)
{
int rval;
unsigned long flags = 0;
uint16_t cnt;
srb_t *sp;
uint8_t status = 0;
if (ha->flags.online) {
@ -3236,19 +3256,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
LOOP_DOWN_TIME);
}
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Requeue all commands in outstanding command list. */
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = ha->outstanding_cmds[cnt];
if (sp) {
ha->outstanding_cmds[cnt] = NULL;
sp->flags = 0;
sp->cmd->result = DID_RESET << 16;
sp->cmd->host_scribble = (unsigned char *)NULL;
qla2x00_sp_compl(ha, sp);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_abort_all_cmds(ha, DID_RESET << 16);
ha->isp_ops->get_flash_version(ha, ha->request_ring);
@ -3273,6 +3282,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
if (ha->eft) {
memset(ha->eft, 0, EFT_SIZE);
rval = qla2x00_enable_eft_trace(ha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
@ -3357,60 +3367,15 @@ static int
qla2x00_restart_isp(scsi_qla_host_t *ha)
{
uint8_t status = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags = 0;
uint32_t wait_time;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(ha)) {
ha->flags.online = 0;
if (!(status = ha->isp_ops->chip_diag(ha))) {
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
status = qla2x00_setup_chip(ha);
goto done;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
!IS_QLA25XX(ha)) {
/*
* Disable SRAM, Instruction RAM and GP RAM
* parity.
*/
WRT_REG_WORD(&reg->hccr,
(HCCR_ENABLE_PARITY + 0x0));
RD_REG_WORD(&reg->hccr);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!(status = ha->isp_ops->chip_diag(ha)))
status = qla2x00_setup_chip(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
!IS_QLA25XX(ha)) {
/* Enable proper parity */
if (IS_QLA2300(ha))
/* SRAM parity */
WRT_REG_WORD(&reg->hccr,
(HCCR_ENABLE_PARITY + 0x1));
else
/*
* SRAM, Instruction RAM and GP RAM
* parity.
*/
WRT_REG_WORD(&reg->hccr,
(HCCR_ENABLE_PARITY + 0x7));
RD_REG_WORD(&reg->hccr);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
}
done:
if (!status && !(status = qla2x00_init_rings(ha))) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
if (!(status = qla2x00_fw_ready(ha))) {

View file

@ -119,6 +119,13 @@ static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha)
qla2x00_get_firmware_state(ha, &fw_state);
}
static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *);
static __inline__ scsi_qla_host_t *
to_qla_parent(scsi_qla_host_t *ha)
{
return ha->parent ? ha->parent : ha;
}
/**
* qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
* @ha: HA context

View file

@ -1815,6 +1815,8 @@ int
qla2x00_request_irqs(scsi_qla_host_t *ha)
{
int ret;
device_reg_t __iomem *reg = ha->iobase;
unsigned long flags;
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
@ -1846,7 +1848,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
DEBUG2(qla_printk(KERN_INFO, ha,
"MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
ha->fw_attributes));
return ret;
goto clear_risc_ints;
}
qla_printk(KERN_WARNING, ha,
"MSI-X: Falling back-to INTa mode -- %d.\n", ret);
@ -1864,15 +1866,30 @@ skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
if (!ret) {
ha->flags.inta_enabled = 1;
ha->host->irq = ha->pdev->irq;
} else {
if (ret) {
qla_printk(KERN_WARNING, ha,
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
}
ha->flags.inta_enabled = 1;
ha->host->irq = ha->pdev->irq;
clear_risc_ints:
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
} else {
WRT_REG_WORD(&reg->isp.semaphore, 0);
WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->isp_ops->enable_intrs(ha);
fail:
return ret;
}

View file

@ -980,7 +980,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
ha->host_no));
if (ha->fw_attributes & BIT_2)
if (ha->flags.npiv_supported)
mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
else
mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;

View file

@ -204,10 +204,8 @@ static int qla2x00_do_dpc(void *data);
static void qla2x00_rst_aen(scsi_qla_host_t *);
static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
static int qla2x00_mem_alloc(scsi_qla_host_t *);
static void qla2x00_mem_free(scsi_qla_host_t *ha);
static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
/* -------------------------------------------------------------------------- */
@ -1117,6 +1115,27 @@ qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
return ha->isp_ops->abort_target(reset_fcport);
}
void
qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
{
int cnt;
unsigned long flags;
srb_t *sp;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = ha->outstanding_cmds[cnt];
if (sp) {
ha->outstanding_cmds[cnt] = NULL;
sp->flags = 0;
sp->cmd->result = res;
sp->cmd->host_scribble = (unsigned char *)NULL;
qla2x00_sp_compl(ha, sp);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static int
qla2xxx_slave_alloc(struct scsi_device *sdev)
{
@ -1557,10 +1576,8 @@ static int __devinit
qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret = -ENODEV;
device_reg_t __iomem *reg;
struct Scsi_Host *host;
scsi_qla_host_t *ha;
unsigned long flags = 0;
char pci_info[30];
char fw_str[30];
struct scsi_host_template *sht;
@ -1608,6 +1625,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->parent = NULL;
ha->bars = bars;
ha->mem_only = mem_only;
spin_lock_init(&ha->hardware_lock);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
@ -1621,8 +1639,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
ha->iobase);
spin_lock_init(&ha->hardware_lock);
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
@ -1751,34 +1767,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
ha->host_no, ha));
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
reg = ha->iobase;
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
} else {
WRT_REG_WORD(&reg->isp.semaphore, 0);
WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
/* Enable proper parity */
if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) {
if (IS_QLA2300(ha))
/* SRAM parity */
WRT_REG_WORD(&reg->isp.hccr,
(HCCR_ENABLE_PARITY + 0x1));
else
/* SRAM, Instruction RAM and GP RAM parity */
WRT_REG_WORD(&reg->isp.hccr,
(HCCR_ENABLE_PARITY + 0x7));
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->isp_ops->enable_intrs(ha);
pci_set_drvdata(pdev, ha);
ha->flags.init_done = 1;
@ -1848,10 +1836,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
static void
qla2x00_free_device(scsi_qla_host_t *ha)
{
qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16);
/* Disable timer */
if (ha->timer_active)
qla2x00_stop_timer(ha);
ha->flags.online = 0;
/* Kill the kernel thread for this host */
if (ha->dpc_thread) {
struct task_struct *t = ha->dpc_thread;
@ -1870,8 +1862,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
if (ha->eft)
qla2x00_disable_eft_trace(ha);
ha->flags.online = 0;
/* Stop currently executing firmware. */
qla2x00_try_to_stop_firmware(ha);
@ -2010,196 +2000,109 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
*
* Returns:
* 0 = success.
* 1 = failure.
* !0 = failure.
*/
static uint8_t
static int
qla2x00_mem_alloc(scsi_qla_host_t *ha)
{
char name[16];
uint8_t status = 1;
int retry= 10;
do {
/*
* This will loop only once if everything goes well, else some
* number of retries will be performed to get around a kernel
* bug where available mem is not allocated until after a
* little delay and a retry.
*/
ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
(ha->request_q_length + 1) * sizeof(request_t),
&ha->request_dma, GFP_KERNEL);
if (ha->request_ring == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - request_ring\n");
ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
(ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma,
GFP_KERNEL);
if (!ha->request_ring)
goto fail;
qla2x00_mem_free(ha);
msleep(100);
ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
(ha->response_q_length + 1) * sizeof(response_t),
&ha->response_dma, GFP_KERNEL);
if (!ha->response_ring)
goto fail_free_request_ring;
continue;
}
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
&ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
goto fail_free_response_ring;
ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
(ha->response_q_length + 1) * sizeof(response_t),
&ha->response_dma, GFP_KERNEL);
if (ha->response_ring == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - response_ring\n");
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
&ha->init_cb_dma, GFP_KERNEL);
if (!ha->init_cb)
goto fail_free_gid_list;
qla2x00_mem_free(ha);
msleep(100);
snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
ha->host_no);
ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DMA_POOL_SIZE, 8, 0);
if (!ha->s_dma_pool)
goto fail_free_init_cb;
continue;
}
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!ha->srb_mempool)
goto fail_free_s_dma_pool;
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
&ha->gid_list_dma, GFP_KERNEL);
if (ha->gid_list == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - gid_list\n");
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (!ha->nvram)
goto fail_free_srb_mempool;
qla2x00_mem_free(ha);
msleep(100);
/* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Get consistent memory allocated for SNS commands */
ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
if (!ha->sns_cmd)
goto fail_free_nvram;
} else {
/* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ms_iocb_dma);
if (!ha->ms_iocb)
goto fail_free_nvram;
continue;
}
/* get consistent memory allocated for init control block */
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev,
ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL);
if (ha->init_cb == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - init_cb\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
memset(ha->init_cb, 0, ha->init_cb_size);
snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
ha->host_no);
ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DMA_POOL_SIZE, 8, 0);
if (ha->s_dma_pool == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - s_dma_pool\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
if (qla2x00_allocate_sp_pool(ha)) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - "
"qla2x00_allocate_sp_pool()\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
/* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
/* Get consistent memory allocated for SNS commands */
ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma,
GFP_KERNEL);
if (ha->sns_cmd == NULL) {
/* error */
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - sns_cmd\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt));
} else {
/* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->ms_iocb_dma);
if (ha->ms_iocb == NULL) {
/* error */
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - ms_iocb\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
memset(ha->ms_iocb, 0, sizeof(ms_iocb_entry_t));
/*
* Get consistent memory allocated for CT SNS
* commands
*/
ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct ct_sns_pkt), &ha->ct_sns_dma,
GFP_KERNEL);
if (ha->ct_sns == NULL) {
/* error */
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - ct_sns\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
if (IS_FWI2_CAPABLE(ha)) {
/*
* Get consistent memory allocated for SFP
* block.
*/
ha->sfp_data = dma_pool_alloc(ha->s_dma_pool,
GFP_KERNEL, &ha->sfp_data_dma);
if (ha->sfp_data == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - "
"sfp_data\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
}
}
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (ha->nvram == NULL) {
/* error */
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - nvram cache\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
/* Done all allocations without any error. */
status = 0;
} while (retry-- && status != 0);
if (status) {
printk(KERN_WARNING
"%s(): **** FAILED ****\n", __func__);
/* Get consistent memory allocated for CT SNS commands */
ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
if (!ha->ct_sns)
goto fail_free_ms_iocb;
}
return(status);
return 0;
fail_free_ms_iocb:
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_s_dma_pool:
dma_pool_destroy(ha->s_dma_pool);
ha->s_dma_pool = NULL;
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
ha->init_cb = NULL;
ha->init_cb_dma = 0;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
fail_free_response_ring:
dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) *
sizeof(response_t), ha->response_ring, ha->response_dma);
ha->response_ring = NULL;
ha->response_dma = 0;
fail_free_request_ring:
dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
sizeof(request_t), ha->request_ring, ha->request_dma);
ha->request_ring = NULL;
ha->request_dma = 0;
fail:
return -ENOMEM;
}
/*
@ -2215,14 +2118,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
struct list_head *fcpl, *fcptemp;
fc_port_t *fcport;
if (ha == NULL) {
/* error */
DEBUG2(printk("%s(): ERROR invalid ha pointer.\n", __func__));
return;
}
/* free sp pool */
qla2x00_free_sp_pool(ha);
if (ha->srb_mempool)
mempool_destroy(ha->srb_mempool);
if (ha->fce)
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@ -2270,6 +2167,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
(ha->request_q_length + 1) * sizeof(request_t),
ha->request_ring, ha->request_dma);
ha->srb_mempool = NULL;
ha->eft = NULL;
ha->eft_dma = 0;
ha->sns_cmd = NULL;
@ -2308,44 +2206,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
kfree(ha->nvram);
}
/*
* qla2x00_allocate_sp_pool
* This routine is called during initialization to allocate
* memory for local srb_t.
*
* Input:
* ha = adapter block pointer.
*
* Context:
* Kernel context.
*/
static int
qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
{
int rval;
rval = QLA_SUCCESS;
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (ha->srb_mempool == NULL) {
qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
rval = QLA_FUNCTION_FAILED;
}
return (rval);
}
/*
* This routine frees all adapter allocated memory.
*
*/
static void
qla2x00_free_sp_pool( scsi_qla_host_t *ha)
{
if (ha->srb_mempool) {
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
}
}
/**************************************************************************
* qla2x00_do_dpc
* This kernel thread is a task that is schedule by the interrupt handler
@ -2367,6 +2227,9 @@ qla2x00_do_dpc(void *data)
fc_port_t *fcport;
uint8_t status;
uint16_t next_loopid;
struct scsi_qla_host *vha;
int i;
ha = (scsi_qla_host_t *)data;
@ -2409,6 +2272,18 @@ qla2x00_do_dpc(void *data)
}
clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
}
for_each_mapped_vp_idx(ha, i) {
list_for_each_entry(vha, &ha->vp_list,
vp_list) {
if (i == vha->vp_idx) {
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
break;
}
}
}
DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
ha->host_no));
}
@ -3029,3 +2904,4 @@ MODULE_FIRMWARE(FW_FILE_ISP22XX);
MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
MODULE_FIRMWARE(FW_FILE_ISP25XX);

View file

@ -893,6 +893,8 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
}
}
#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
void
qla2x00_beacon_blink(struct scsi_qla_host *ha)
{
@ -902,15 +904,12 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
unsigned long flags;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (ha->pio_address)
reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Save the Original GPIOE. */
if (ha->pio_address) {
gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
gpio_enable = RD_REG_WORD(&reg->gpioe);
gpio_data = RD_REG_WORD(&reg->gpiod);
@ -920,7 +919,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
gpio_enable |= GPIO_LED_MASK;
if (ha->pio_address) {
WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
WRT_REG_WORD(&reg->gpioe, gpio_enable);
RD_REG_WORD(&reg->gpioe);
@ -936,7 +935,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
/* Set the modified gpio_data values */
if (ha->pio_address) {
WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
WRT_REG_WORD(&reg->gpiod, gpio_data);
RD_REG_WORD(&reg->gpiod);
@ -962,14 +961,11 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
return QLA_FUNCTION_FAILED;
}
if (ha->pio_address)
reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
/* Turn off LEDs. */
spin_lock_irqsave(&ha->hardware_lock, flags);
if (ha->pio_address) {
gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
gpio_enable = RD_REG_WORD(&reg->gpioe);
gpio_data = RD_REG_WORD(&reg->gpiod);
@ -978,7 +974,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
/* Set the modified gpio_enable values. */
if (ha->pio_address) {
WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
WRT_REG_WORD(&reg->gpioe, gpio_enable);
RD_REG_WORD(&reg->gpioe);
@ -987,7 +983,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
/* Clear out previously set LED colour. */
gpio_data &= ~GPIO_LED_MASK;
if (ha->pio_address) {
WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
WRT_REG_WORD(&reg->gpiod, gpio_data);
RD_REG_WORD(&reg->gpiod);
@ -1244,13 +1240,12 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
if (ha->pio_address) {
uint16_t data2;
reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
do {
data = RD_REG_WORD_PIO(&reg->flash_data);
data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
barrier();
cpu_relax();
data2 = RD_REG_WORD_PIO(&reg->flash_data);
data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
} while (data != data2);
} else {
WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
@ -1304,9 +1299,8 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
/* Always perform IO mapped accesses to the FLASH registers. */
if (ha->pio_address) {
reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data);
WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
} else {
WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */

View file

@ -7,7 +7,7 @@
/*
* Driver version
*/
#define QLA2XXX_VERSION "8.02.00-k7"
#define QLA2XXX_VERSION "8.02.00-k8"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2

View file

@ -1306,6 +1306,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags);
clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
iscsi_unblock_session(ddb_entry->sess);
iscsi_session_event(ddb_entry->sess,
ISCSI_KEVENT_CREATE_SESSION);
/*

View file

@ -63,8 +63,6 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
enum iscsi_param param, char *buf);
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
/*
@ -91,6 +89,8 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
.scan_finished = iscsi_scan_finished,
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@ -116,8 +116,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_sess_get_param,
.get_host_param = qla4xxx_host_get_param,
.start_conn = qla4xxx_conn_start,
.stop_conn = qla4xxx_conn_stop,
.session_recovery_timedout = qla4xxx_recovery_timedout,
};
@ -128,48 +126,19 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
struct ddb_entry *ddb_entry = session->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) "
"secs exhausted, marking device DEAD.\n", ha->host_no,
__func__, ddb_entry->fw_ddb_index,
ha->port_down_retry_count));
if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count "
"of (%d) secs exhausted, marking device DEAD.\n",
ha->host_no, __func__, ddb_entry->fw_ddb_index,
ha->port_down_retry_count));
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
"0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
queue_work(ha->dpc_thread, &ha->dpc_work);
}
static int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
{
struct iscsi_cls_session *session;
struct ddb_entry *ddb_entry;
session = iscsi_dev_to_session(conn->dev.parent);
ddb_entry = session->dd_data;
DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
ddb_entry->ha->host_no, __func__,
ddb_entry->fw_ddb_index));
iscsi_unblock_session(session);
return 0;
}
static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
{
struct iscsi_cls_session *session;
struct ddb_entry *ddb_entry;
session = iscsi_dev_to_session(conn->dev.parent);
ddb_entry = session->dd_data;
DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n",
ddb_entry->ha->host_no, __func__,
ddb_entry->fw_ddb_index));
if (flag == STOP_CONN_RECOVER)
iscsi_block_session(session);
else
printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc "
"flags = 0x%lx\n",
ha->host_no, __func__, ha->dpc_flags));
queue_work(ha->dpc_thread, &ha->dpc_work);
}
}
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
@ -308,6 +277,9 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
return -ENOMEM;
}
/* finally ready to go */
iscsi_unblock_session(ddb_entry->sess);
return 0;
}
@ -364,6 +336,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n",
ha->host_no, ddb_entry->bus, ddb_entry->target,
ddb_entry->fw_ddb_index));
iscsi_block_session(ddb_entry->sess);
iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
}
@ -430,9 +403,21 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
{
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
struct iscsi_cls_session *sess = ddb_entry->sess;
struct srb *srb;
int rval;
if (!sess) {
cmd->result = DID_IMM_RETRY << 16;
goto qc_fail_command;
}
rval = iscsi_session_chkready(sess);
if (rval) {
cmd->result = rval;
goto qc_fail_command;
}
if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
@ -1323,7 +1308,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
scsi_scan_host(host);
return 0;
remove_host:

View file

@ -969,9 +969,10 @@ void starget_for_each_device(struct scsi_target *starget, void *data,
EXPORT_SYMBOL(starget_for_each_device);
/**
* __starget_for_each_device - helper to walk all devices of a target
* (UNLOCKED)
* __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
* @starget: target whose devices we want to iterate over.
* @data: parameter for callback @fn()
* @fn: callback function that is invoked for each device
*
* This traverses over each device of @starget. It does _not_
* take a reference on the scsi_device, so the whole loop must be

View file

@ -301,7 +301,6 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
page = sg_page(sg);
off = sg->offset;
len = sg->length;
data_len += len;
while (len > 0 && data_len > 0) {
/*

View file

@ -30,10 +30,10 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
#define ISCSI_SESSION_ATTRS 18
#define ISCSI_CONN_ATTRS 11
#define ISCSI_SESSION_ATTRS 19
#define ISCSI_CONN_ATTRS 13
#define ISCSI_HOST_ATTRS 4
#define ISCSI_TRANSPORT_VERSION "2.0-867"
#define ISCSI_TRANSPORT_VERSION "2.0-868"
struct iscsi_internal {
int daemon_pid;
@ -127,12 +127,13 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
memset(ihost, 0, sizeof(*ihost));
INIT_LIST_HEAD(&ihost->sessions);
mutex_init(&ihost->mutex);
atomic_set(&ihost->nr_scans, 0);
snprintf(ihost->unbind_workq_name, KOBJ_NAME_LEN, "iscsi_unbind_%d",
snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
shost->host_no);
ihost->unbind_workq = create_singlethread_workqueue(
ihost->unbind_workq_name);
if (!ihost->unbind_workq)
ihost->scan_workq = create_singlethread_workqueue(
ihost->scan_workq_name);
if (!ihost->scan_workq)
return -ENOMEM;
return 0;
}
@ -143,7 +144,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
struct Scsi_Host *shost = dev_to_shost(dev);
struct iscsi_host *ihost = shost->shost_data;
destroy_workqueue(ihost->unbind_workq);
destroy_workqueue(ihost->scan_workq);
return 0;
}
@ -221,6 +222,54 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
* The following functions can be used by LLDs that allocate
* their own scsi_hosts or by software iscsi LLDs
*/
static struct {
int value;
char *name;
} iscsi_session_state_names[] = {
{ ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
{ ISCSI_SESSION_FAILED, "FAILED" },
{ ISCSI_SESSION_FREE, "FREE" },
};
const char *iscsi_session_state_name(int state)
{
int i;
char *name = NULL;
for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
if (iscsi_session_state_names[i].value == state) {
name = iscsi_session_state_names[i].name;
break;
}
}
return name;
}
int iscsi_session_chkready(struct iscsi_cls_session *session)
{
unsigned long flags;
int err;
spin_lock_irqsave(&session->lock, flags);
switch (session->state) {
case ISCSI_SESSION_LOGGED_IN:
err = 0;
break;
case ISCSI_SESSION_FAILED:
err = DID_IMM_RETRY << 16;
break;
case ISCSI_SESSION_FREE:
err = DID_NO_CONNECT << 16;
break;
default:
err = DID_NO_CONNECT << 16;
break;
}
spin_unlock_irqrestore(&session->lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_session_chkready);
static void iscsi_session_release(struct device *dev)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
@ -236,6 +285,25 @@ static int iscsi_is_session_dev(const struct device *dev)
return dev->release == iscsi_session_release;
}
/**
* iscsi_scan_finished - helper to report when running scans are done
* @shost: scsi host
* @time: scan run time
*
* This function can be used by drives like qla4xxx to report to the scsi
* layer when the scans it kicked off at module load time are done.
*/
int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct iscsi_host *ihost = shost->shost_data;
/*
* qla4xxx will have kicked off some session unblocks before calling
* scsi_scan_host, so just wait for them to complete.
*/
return !atomic_read(&ihost->nr_scans);
}
EXPORT_SYMBOL_GPL(iscsi_scan_finished);
static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
uint id, uint lun)
{
@ -254,14 +322,50 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
return 0;
}
static void iscsi_scan_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session, scan_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data;
unsigned long flags;
spin_lock_irqsave(&session->lock, flags);
if (session->state != ISCSI_SESSION_LOGGED_IN) {
spin_unlock_irqrestore(&session->lock, flags);
goto done;
}
spin_unlock_irqrestore(&session->lock, flags);
scsi_scan_target(&session->dev, 0, session->target_id,
SCAN_WILD_CARD, 1);
done:
atomic_dec(&ihost->nr_scans);
}
static void session_recovery_timedout(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
recovery_work.work);
unsigned long flags;
dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
"out after %d secs\n", session->recovery_tmo);
iscsi_cls_session_printk(KERN_INFO, session,
"session recovery timed out after %d secs\n",
session->recovery_tmo);
spin_lock_irqsave(&session->lock, flags);
switch (session->state) {
case ISCSI_SESSION_FAILED:
session->state = ISCSI_SESSION_FREE;
break;
case ISCSI_SESSION_LOGGED_IN:
case ISCSI_SESSION_FREE:
/* we raced with the unblock's flush */
spin_unlock_irqrestore(&session->lock, flags);
return;
}
spin_unlock_irqrestore(&session->lock, flags);
if (session->transport->session_recovery_timedout)
session->transport->session_recovery_timedout(session);
@ -269,16 +373,44 @@ static void session_recovery_timedout(struct work_struct *work)
scsi_target_unblock(&session->dev);
}
void iscsi_unblock_session(struct iscsi_cls_session *session)
void __iscsi_unblock_session(struct iscsi_cls_session *session)
{
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
scsi_target_unblock(&session->dev);
}
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data;
unsigned long flags;
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_LOGGED_IN;
spin_unlock_irqrestore(&session->lock, flags);
__iscsi_unblock_session(session);
/*
* Only do kernel scanning if the driver is properly hooked into
* the async scanning code (drivers like iscsi_tcp do login and
* scanning from userspace).
*/
if (shost->hostt->scan_finished) {
if (queue_work(ihost->scan_workq, &session->scan_work))
atomic_inc(&ihost->nr_scans);
}
}
EXPORT_SYMBOL_GPL(iscsi_unblock_session);
void iscsi_block_session(struct iscsi_cls_session *session)
{
unsigned long flags;
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_block(&session->dev);
queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
session->recovery_tmo * HZ);
@ -311,7 +443,7 @@ static int iscsi_unbind_session(struct iscsi_cls_session *session)
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data;
return queue_work(ihost->unbind_workq, &session->unbind_work);
return queue_work(ihost->scan_workq, &session->unbind_work);
}
struct iscsi_cls_session *
@ -327,10 +459,13 @@ iscsi_alloc_session(struct Scsi_Host *shost,
session->transport = transport;
session->recovery_tmo = 120;
session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->host_list);
INIT_LIST_HEAD(&session->sess_list);
INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
INIT_WORK(&session->scan_work, iscsi_scan_session);
spin_lock_init(&session->lock);
/* this is released in the dev's release function */
scsi_host_get(shost);
@ -358,8 +493,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
session->sid);
err = device_add(&session->dev);
if (err) {
dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
"register session's dev\n");
iscsi_cls_session_printk(KERN_ERR, session,
"could not register session's dev\n");
goto release_host;
}
transport_register_device(&session->dev);
@ -444,22 +579,28 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
* If we are blocked let commands flow again. The lld or iscsi
* layer should set up the queuecommand to fail commands.
*/
iscsi_unblock_session(session);
iscsi_unbind_session(session);
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_FREE;
spin_unlock_irqrestore(&session->lock, flags);
__iscsi_unblock_session(session);
__iscsi_unbind_session(&session->unbind_work);
/* flush running scans */
flush_workqueue(ihost->scan_workq);
/*
* If the session dropped while removing devices then we need to make
* sure it is not blocked
*/
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
flush_workqueue(ihost->unbind_workq);
/* hw iscsi may not have removed all connections from session */
err = device_for_each_child(&session->dev, NULL,
iscsi_iter_destroy_conn_fn);
if (err)
dev_printk(KERN_ERR, &session->dev, "iscsi: Could not delete "
"all connections for session. Error %d.\n", err);
iscsi_cls_session_printk(KERN_ERR, session,
"Could not delete all connections "
"for session. Error %d.\n", err);
transport_unregister_device(&session->dev);
device_del(&session->dev);
@ -531,8 +672,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
conn->dev.release = iscsi_conn_release;
err = device_register(&conn->dev);
if (err) {
dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register "
"connection's dev\n");
iscsi_cls_session_printk(KERN_ERR, session, "could not "
"register connection's dev\n");
goto release_parent_ref;
}
transport_register_device(&conn->dev);
@ -639,8 +780,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
"control PDU: OOM\n");
iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
"control PDU: OOM\n");
return -ENOMEM;
}
@ -661,20 +802,27 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = NLMSG_SPACE(sizeof(*ev));
unsigned long flags;
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
return;
spin_lock_irqsave(&session->lock, flags);
if (session->state == ISCSI_SESSION_LOGGED_IN)
session->state = ISCSI_SESSION_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored "
"conn error (%d)\n", error);
iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
"conn error (%d)\n", error);
return;
}
@ -688,8 +836,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
iscsi_broadcast_skb(skb, GFP_ATOMIC);
dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
error);
iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
error);
}
EXPORT_SYMBOL_GPL(iscsi_conn_error);
@ -744,8 +892,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
skbstat = alloc_skb(len, GFP_ATOMIC);
if (!skbstat) {
dev_printk(KERN_ERR, &conn->dev, "iscsi: can not "
"deliver stats: OOM\n");
iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
"deliver stats: OOM\n");
return -ENOMEM;
}
@ -801,8 +949,9 @@ int iscsi_session_event(struct iscsi_cls_session *session,
skb = alloc_skb(len, GFP_KERNEL);
if (!skb) {
dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
"of session event %u\n", event);
iscsi_cls_session_printk(KERN_ERR, session,
"Cannot notify userspace of session "
"event %u\n", event);
return -ENOMEM;
}
@ -825,8 +974,8 @@ int iscsi_session_event(struct iscsi_cls_session *session,
ev->r.unbind_session.sid = session->sid;
break;
default:
dev_printk(KERN_ERR, &session->dev, "Invalid event %u.\n",
event);
iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
"%u.\n", event);
kfree_skb(skb);
return -EINVAL;
}
@ -837,8 +986,10 @@ int iscsi_session_event(struct iscsi_cls_session *session,
*/
rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
if (rc < 0)
dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
"of session event %u. Check iscsi daemon\n", event);
iscsi_cls_session_printk(KERN_ERR, session,
"Cannot notify userspace of session "
"event %u. Check iscsi daemon\n",
event);
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_session_event);
@ -871,16 +1022,15 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session = iscsi_session_lookup(ev->u.c_conn.sid);
if (!session) {
printk(KERN_ERR "iscsi: invalid session %d\n",
printk(KERN_ERR "iscsi: invalid session %d.\n",
ev->u.c_conn.sid);
return -EINVAL;
}
conn = transport->create_conn(session, ev->u.c_conn.cid);
if (!conn) {
printk(KERN_ERR "iscsi: couldn't create a new "
"connection for session %d\n",
session->sid);
iscsi_cls_session_printk(KERN_ERR, session,
"couldn't create a new connection.");
return -ENOMEM;
}
@ -1246,6 +1396,15 @@ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
static ssize_t
show_priv_session_state(struct class_device *cdev, char *buf)
{
struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
}
static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
show_priv_session_##field(struct class_device *cdev, char *buf) \
@ -1472,6 +1631,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
SETUP_PRIV_SESSION_RD_ATTR(state);
BUG_ON(count > ISCSI_SESSION_ATTRS);
priv->session_attrs[count] = NULL;

View file

@ -929,6 +929,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int xfer_size = scsi_bufflen(SCpnt);
unsigned int good_bytes = result ? 0 : xfer_size;
u64 start_lba = SCpnt->request->sector;
u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
u64 bad_lba;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
@ -967,26 +968,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
goto out;
if (xfer_size <= SCpnt->device->sector_size)
goto out;
switch (SCpnt->device->sector_size) {
case 256:
if (SCpnt->device->sector_size < 512) {
/* only legitimate sector_size here is 256 */
start_lba <<= 1;
break;
case 512:
break;
case 1024:
start_lba >>= 1;
break;
case 2048:
start_lba >>= 2;
break;
case 4096:
start_lba >>= 3;
break;
default:
/* Print something here with limiting frequency. */
goto out;
break;
end_lba <<= 1;
} else {
/* be careful ... don't want any overflows */
u64 factor = SCpnt->device->sector_size / 512;
do_div(start_lba, factor);
do_div(end_lba, factor);
}
if (bad_lba < start_lba || bad_lba >= end_lba)
/* the bad lba was reported incorrectly, we have
* no idea where the error is
*/
goto out;
/* This computation should always be done in terms of
* the resolution of the device's medium.
*/

689
drivers/scsi/ses.c Normal file
View file

@ -0,0 +1,689 @@
/*
* SCSI Enclosure Services
*
* Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the GNU General Public License
** version 2 as published by the Free Software Foundation.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/enclosure.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_host.h>
struct ses_device {
char *page1;
char *page2;
char *page10;
short page1_len;
short page2_len;
short page10_len;
};
struct ses_component {
u64 addr;
unsigned char *desc;
};
static int ses_probe(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
int err = -ENODEV;
if (sdev->type != TYPE_ENCLOSURE)
goto out;
err = 0;
sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n");
out:
return err;
}
#define SES_TIMEOUT 30
#define SES_RETRIES 3
static int ses_recv_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
char cmd[] = {
RECEIVE_DIAGNOSTIC,
1, /* Set PCV bit */
page_code,
bufflen >> 8,
bufflen & 0xff,
0
};
return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES);
}
static int ses_send_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
u32 result;
char cmd[] = {
SEND_DIAGNOSTIC,
0x10, /* Set PF bit */
0,
bufflen >> 8,
bufflen & 0xff,
0
};
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
return result;
}
static int ses_set_page2_descriptor(struct enclosure_device *edev,
struct enclosure_component *ecomp,
char *desc)
{
int i, j, count = 0, descriptor = ecomp->number;
struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
struct ses_device *ses_dev = edev->scratch;
char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
char *desc_ptr = ses_dev->page2 + 8;
/* Clear everything */
memset(desc_ptr, 0, ses_dev->page2_len - 8);
for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
desc_ptr += 4;
if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
continue;
if (count++ == descriptor) {
memcpy(desc_ptr, desc, 4);
/* set select */
desc_ptr[0] |= 0x80;
/* clear reserved, just in case */
desc_ptr[0] &= 0xf0;
}
}
}
return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
}
static char *ses_get_page2_descriptor(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
int i, j, count = 0, descriptor = ecomp->number;
struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
struct ses_device *ses_dev = edev->scratch;
char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
char *desc_ptr = ses_dev->page2 + 8;
ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
desc_ptr += 4;
if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
continue;
if (count++ == descriptor)
return desc_ptr;
}
}
return NULL;
}
static void ses_get_fault(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
char *desc;
desc = ses_get_page2_descriptor(edev, ecomp);
ecomp->fault = (desc[3] & 0x60) >> 4;
}
static int ses_set_fault(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
char desc[4] = {0 };
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
/* zero is disabled */
break;
case ENCLOSURE_SETTING_ENABLED:
desc[2] = 0x02;
break;
default:
/* SES doesn't do the SGPIO blink settings */
return -EINVAL;
}
return ses_set_page2_descriptor(edev, ecomp, desc);
}
static void ses_get_status(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
char *desc;
desc = ses_get_page2_descriptor(edev, ecomp);
ecomp->status = (desc[0] & 0x0f);
}
static void ses_get_locate(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
char *desc;
desc = ses_get_page2_descriptor(edev, ecomp);
ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
}
static int ses_set_locate(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
char desc[4] = {0 };
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
/* zero is disabled */
break;
case ENCLOSURE_SETTING_ENABLED:
desc[2] = 0x02;
break;
default:
/* SES doesn't do the SGPIO blink settings */
return -EINVAL;
}
return ses_set_page2_descriptor(edev, ecomp, desc);
}
static int ses_set_active(struct enclosure_device *edev,
struct enclosure_component *ecomp,
enum enclosure_component_setting val)
{
char desc[4] = {0 };
switch (val) {
case ENCLOSURE_SETTING_DISABLED:
/* zero is disabled */
ecomp->active = 0;
break;
case ENCLOSURE_SETTING_ENABLED:
desc[2] = 0x80;
ecomp->active = 1;
break;
default:
/* SES doesn't do the SGPIO blink settings */
return -EINVAL;
}
return ses_set_page2_descriptor(edev, ecomp, desc);
}
static struct enclosure_component_callbacks ses_enclosure_callbacks = {
.get_fault = ses_get_fault,
.set_fault = ses_set_fault,
.get_status = ses_get_status,
.get_locate = ses_get_locate,
.set_locate = ses_set_locate,
.set_active = ses_set_active,
};
struct ses_host_edev {
struct Scsi_Host *shost;
struct enclosure_device *edev;
};
int ses_match_host(struct enclosure_device *edev, void *data)
{
struct ses_host_edev *sed = data;
struct scsi_device *sdev;
if (!scsi_is_sdev_device(edev->cdev.dev))
return 0;
sdev = to_scsi_device(edev->cdev.dev);
if (sdev->host != sed->shost)
return 0;
sed->edev = edev;
return 1;
}
static void ses_process_descriptor(struct enclosure_component *ecomp,
unsigned char *desc)
{
int eip = desc[0] & 0x10;
int invalid = desc[0] & 0x80;
enum scsi_protocol proto = desc[0] & 0x0f;
u64 addr = 0;
struct ses_component *scomp = ecomp->scratch;
unsigned char *d;
scomp->desc = desc;
if (invalid)
return;
switch (proto) {
case SCSI_PROTOCOL_SAS:
if (eip)
d = desc + 8;
else
d = desc + 4;
/* only take the phy0 addr */
addr = (u64)d[12] << 56 |
(u64)d[13] << 48 |
(u64)d[14] << 40 |
(u64)d[15] << 32 |
(u64)d[16] << 24 |
(u64)d[17] << 16 |
(u64)d[18] << 8 |
(u64)d[19];
break;
default:
/* FIXME: Need to add more protocols than just SAS */
break;
}
scomp->addr = addr;
}
struct efd {
u64 addr;
struct device *dev;
};
static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
void *data)
{
struct efd *efd = data;
int i;
struct ses_component *scomp;
if (!edev->component[0].scratch)
return 0;
for (i = 0; i < edev->components; i++) {
scomp = edev->component[i].scratch;
if (scomp->addr != efd->addr)
continue;
enclosure_add_device(edev, i, efd->dev);
return 1;
}
return 0;
}
#define VPD_INQUIRY_SIZE 512
static void ses_match_to_enclosure(struct enclosure_device *edev,
struct scsi_device *sdev)
{
unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
unsigned char *desc;
int len;
struct efd efd = {
.addr = 0,
};
unsigned char cmd[] = {
INQUIRY,
1,
0x83,
VPD_INQUIRY_SIZE >> 8,
VPD_INQUIRY_SIZE & 0xff,
0
};
if (!buf)
return;
if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
goto free;
len = (buf[2] << 8) + buf[3];
desc = buf + 4;
while (desc < buf + len) {
enum scsi_protocol proto = desc[0] >> 4;
u8 code_set = desc[0] & 0x0f;
u8 piv = desc[1] & 0x80;
u8 assoc = (desc[1] & 0x30) >> 4;
u8 type = desc[1] & 0x0f;
u8 len = desc[3];
if (piv && code_set == 1 && assoc == 1 && code_set == 1
&& proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
efd.addr = (u64)desc[4] << 56 |
(u64)desc[5] << 48 |
(u64)desc[6] << 40 |
(u64)desc[7] << 32 |
(u64)desc[8] << 24 |
(u64)desc[9] << 16 |
(u64)desc[10] << 8 |
(u64)desc[11];
desc += len + 4;
}
if (!efd.addr)
goto free;
efd.dev = &sdev->sdev_gendev;
enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
free:
kfree(buf);
}
#define INIT_ALLOC_SIZE 32
static int ses_intf_add(struct class_device *cdev,
struct class_interface *intf)
{
struct scsi_device *sdev = to_scsi_device(cdev->dev);
struct scsi_device *tmp_sdev;
unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr,
*addl_desc_ptr;
struct ses_device *ses_dev;
u32 result;
int i, j, types, len, components = 0;
int err = -ENOMEM;
struct enclosure_device *edev;
struct ses_component *scomp;
if (!scsi_device_enclosure(sdev)) {
/* not an enclosure, but might be in one */
edev = enclosure_find(&sdev->host->shost_gendev);
if (edev) {
ses_match_to_enclosure(edev, sdev);
class_device_put(&edev->cdev);
}
return -ENODEV;
}
/* TYPE_ENCLOSURE prints a message in probe */
if (sdev->type != TYPE_ENCLOSURE)
sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n");
ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL);
hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
if (!hdr_buf || !ses_dev)
goto err_init_free;
result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto recv_failed;
if (hdr_buf[1] != 0) {
/* FIXME: need subenclosure support; I've just never
* seen a device with subenclosures and it makes the
* traversal routines more complex */
sdev_printk(KERN_ERR, sdev,
"FIXME driver has no support for subenclosures (%d)\n",
buf[1]);
goto err_free;
}
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
goto err_free;
ses_dev->page1 = buf;
ses_dev->page1_len = len;
result = ses_recv_diag(sdev, 1, buf, len);
if (result)
goto recv_failed;
types = buf[10];
len = buf[11];
type_ptr = buf + 12 + len;
for (i = 0; i < types; i++, type_ptr += 4) {
if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
components += type_ptr[1];
}
result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto recv_failed;
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
goto err_free;
/* make sure getting page 2 actually works */
result = ses_recv_diag(sdev, 2, buf, len);
if (result)
goto recv_failed;
ses_dev->page2 = buf;
ses_dev->page2_len = len;
/* The additional information page --- allows us
* to match up the devices */
result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto no_page10;
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
buf = kzalloc(len, GFP_KERNEL);
if (!buf)
goto err_free;
result = ses_recv_diag(sdev, 10, buf, len);
if (result)
goto recv_failed;
ses_dev->page10 = buf;
ses_dev->page10_len = len;
no_page10:
scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
if (!scomp)
goto err_free;
edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id,
components, &ses_enclosure_callbacks);
if (IS_ERR(edev)) {
err = PTR_ERR(edev);
goto err_free;
}
edev->scratch = ses_dev;
for (i = 0; i < components; i++)
edev->component[i].scratch = scomp++;
/* Page 7 for the descriptors is optional */
buf = NULL;
result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
if (result)
goto simple_populate;
len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
/* add 1 for trailing '\0' we'll use */
buf = kzalloc(len + 1, GFP_KERNEL);
result = ses_recv_diag(sdev, 7, buf, len);
if (result) {
simple_populate:
kfree(buf);
buf = NULL;
desc_ptr = NULL;
addl_desc_ptr = NULL;
} else {
desc_ptr = buf + 8;
len = (desc_ptr[2] << 8) + desc_ptr[3];
/* skip past overall descriptor */
desc_ptr += len + 4;
addl_desc_ptr = ses_dev->page10 + 8;
}
type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
components = 0;
for (i = 0; i < types; i++, type_ptr += 4) {
for (j = 0; j < type_ptr[1]; j++) {
char *name = NULL;
struct enclosure_component *ecomp;
if (desc_ptr) {
len = (desc_ptr[2] << 8) + desc_ptr[3];
desc_ptr += 4;
/* Add trailing zero - pushes into
* reserved space */
desc_ptr[len] = '\0';
name = desc_ptr;
}
if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
continue;
ecomp = enclosure_component_register(edev,
components++,
type_ptr[0],
name);
if (desc_ptr) {
desc_ptr += len;
if (!IS_ERR(ecomp))
ses_process_descriptor(ecomp,
addl_desc_ptr);
if (addl_desc_ptr)
addl_desc_ptr += addl_desc_ptr[1] + 2;
}
}
}
kfree(buf);
kfree(hdr_buf);
/* see if there are any devices matching before
* we found the enclosure */
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
ses_match_to_enclosure(edev, tmp_sdev);
}
return 0;
recv_failed:
sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
result);
err = -ENODEV;
err_free:
kfree(buf);
kfree(ses_dev->page10);
kfree(ses_dev->page2);
kfree(ses_dev->page1);
err_init_free:
kfree(ses_dev);
kfree(hdr_buf);
sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err);
return err;
}
static int ses_remove(struct device *dev)
{
return 0;
}
static void ses_intf_remove(struct class_device *cdev,
struct class_interface *intf)
{
struct scsi_device *sdev = to_scsi_device(cdev->dev);
struct enclosure_device *edev;
struct ses_device *ses_dev;
if (!scsi_device_enclosure(sdev))
return;
edev = enclosure_find(cdev->dev);
if (!edev)
return;
ses_dev = edev->scratch;
edev->scratch = NULL;
kfree(ses_dev->page1);
kfree(ses_dev->page2);
kfree(ses_dev);
kfree(edev->component[0].scratch);
class_device_put(&edev->cdev);
enclosure_unregister(edev);
}
static struct class_interface ses_interface = {
.add = ses_intf_add,
.remove = ses_intf_remove,
};
static struct scsi_driver ses_template = {
.owner = THIS_MODULE,
.gendrv = {
.name = "ses",
.probe = ses_probe,
.remove = ses_remove,
},
};
static int __init ses_init(void)
{
int err;
err = scsi_register_interface(&ses_interface);
if (err)
return err;
err = scsi_register_driver(&ses_template.gendrv);
if (err)
goto out_unreg;
return 0;
out_unreg:
scsi_unregister_interface(&ses_interface);
return err;
}
static void __exit ses_exit(void)
{
scsi_unregister_driver(&ses_template.gendrv);
scsi_unregister_interface(&ses_interface);
}
module_init(ses_init);
module_exit(ses_exit);
MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE);
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver");
MODULE_LICENSE("GPL v2");

View file

@ -163,6 +163,29 @@ static void scsi_cd_put(struct scsi_cd *cd)
mutex_unlock(&sr_ref_mutex);
}
/* identical to scsi_test_unit_ready except that it doesn't
* eat the NOT_READY returns for removable media */
int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
{
int retries = MAX_RETRIES;
int the_result;
u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 };
/* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
* conditions are gone, or a timeout happens
*/
do {
the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
0, sshdr, SR_TIMEOUT,
retries--);
} while (retries > 0 &&
(!scsi_status_is_good(the_result) ||
(scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION)));
return the_result;
}
/*
* This function checks to see if the media has been changed in the
* CDROM drive. It is possible that we have already sensed a change,
@ -185,8 +208,7 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
}
sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
sshdr);
retval = sr_test_unit_ready(cd->device, sshdr);
if (retval || (scsi_sense_valid(sshdr) &&
/* 0x3a is medium not present */
sshdr->asc == 0x3a)) {
@ -733,10 +755,8 @@ static void get_capabilities(struct scsi_cd *cd)
{
unsigned char *buffer;
struct scsi_mode_data data;
unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
unsigned int the_result;
int retries, rc, n;
int rc, n;
static const char *loadmech[] =
{
@ -758,23 +778,8 @@ static void get_capabilities(struct scsi_cd *cd)
return;
}
/* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
* conditions are gone, or a timeout happens
*/
retries = 0;
do {
memset((void *)cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL,
0, &sshdr, SR_TIMEOUT,
MAX_RETRIES);
retries++;
} while (retries < 5 &&
(!scsi_status_is_good(the_result) ||
(scsi_sense_valid(&sshdr) &&
sshdr.sense_key == UNIT_ATTENTION)));
/* eat unit attentions */
sr_test_unit_ready(cd->device, &sshdr);
/* ask for mode page 0x2a */
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,

View file

@ -61,6 +61,7 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed);
int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
int sr_is_xa(Scsi_CD *);
int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr);
/* sr_vendor.c */
void sr_vendor_init(Scsi_CD *);

View file

@ -306,8 +306,7 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
/* we have no changer support */
return -EINVAL;
}
if (0 == scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
&sshdr))
if (0 == sr_test_unit_ready(cd->device, &sshdr))
return CDS_DISC_OK;
if (!cdrom_get_media_event(cdi, &med)) {

View file

@ -1,392 +1,316 @@
/* sun3x_esp.c: EnhancedScsiProcessor Sun3x SCSI driver code.
/* sun3x_esp.c: ESP front-end for Sun3x systems.
*
* (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*
* Based on David S. Miller's esp driver
* Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "NCR53C9x.h"
#include <asm/sun3x.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/dvma.h>
#include <asm/irq.h>
static void dma_barrier(struct NCR_ESP *esp);
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_drain(struct NCR_ESP *esp);
static void dma_invalidate(struct NCR_ESP *esp);
static void dma_dump_state(struct NCR_ESP *esp);
static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
static void dma_ints_off(struct NCR_ESP *esp);
static void dma_ints_on(struct NCR_ESP *esp);
static int dma_irq_p(struct NCR_ESP *esp);
static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr);
static int dma_ports_p(struct NCR_ESP *esp);
static void dma_reset(struct NCR_ESP *esp);
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
static void dma_advance_sg (Scsi_Cmnd *sp);
/* DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
/* Detecting ESP chips on the machine. This is the simple and easy
* version.
#include <scsi/scsi_host.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "sun3x_esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "1.000"
#define DRV_MODULE_RELDATE "Nov 1, 2007"
/*
* m68k always assumes readl/writel operate on little endian
* mmio space; this is wrong at least for Sun3x, so we
* need to workaround this until a proper way is found
*/
int sun3x_esp_detect(struct scsi_host_template *tpnt)
#if 0
#define dma_read32(REG) \
readl(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
writel((VAL), esp->dma_regs + (REG))
#else
#define dma_read32(REG) \
*(volatile u32 *)(esp->dma_regs + (REG))
#define dma_write32(VAL, REG) \
do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
#endif
static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
struct NCR_ESP *esp;
struct ConfigDev *esp_dev;
esp_dev = 0;
esp = esp_allocate(tpnt, esp_dev, 0);
/* Do command transfer with DMA */
esp->do_pio_cmds = 0;
/* Required functions */
esp->dma_bytes_sent = &dma_bytes_sent;
esp->dma_can_transfer = &dma_can_transfer;
esp->dma_dump_state = &dma_dump_state;
esp->dma_init_read = &dma_init_read;
esp->dma_init_write = &dma_init_write;
esp->dma_ints_off = &dma_ints_off;
esp->dma_ints_on = &dma_ints_on;
esp->dma_irq_p = &dma_irq_p;
esp->dma_ports_p = &dma_ports_p;
esp->dma_setup = &dma_setup;
/* Optional functions */
esp->dma_barrier = &dma_barrier;
esp->dma_invalidate = &dma_invalidate;
esp->dma_drain = &dma_drain;
esp->dma_irq_entry = 0;
esp->dma_irq_exit = 0;
esp->dma_led_on = 0;
esp->dma_led_off = 0;
esp->dma_poll = &dma_poll;
esp->dma_reset = &dma_reset;
/* virtual DMA functions */
esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
esp->dma_advance_sg = &dma_advance_sg;
/* SCSI chip speed */
esp->cfreq = 20000000;
esp->eregs = (struct ESP_regs *)(SUN3X_ESP_BASE);
esp->dregs = (void *)SUN3X_ESP_DMA;
esp->esp_command = (volatile unsigned char *)dvma_malloc(DVMA_PAGE_SIZE);
esp->esp_command_dvma = dvma_vtob((unsigned long)esp->esp_command);
esp->irq = 2;
if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
"SUN3X SCSI", esp->ehost)) {
esp_deallocate(esp);
return 0;
}
esp->scsi_id = 7;
esp->diff = 0;
esp_initialize(esp);
/* for reasons beyond my knowledge (and which should likely be fixed)
sync mode doesn't work on a 3/80 at 5mhz. but it does at 4. */
esp->sync_defp = 0x3f;
printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,
esps_in_use);
esps_running = esps_in_use;
return esps_in_use;
writeb(val, esp->regs + (reg * 4UL));
}
static void dma_do_drain(struct NCR_ESP *esp)
static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
int count = 500000;
while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
udelay(1);
if(!count) {
printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
}
dregs->cond_reg |= DMA_FIFO_STDRAIN;
count = 500000;
while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
udelay(1);
if(!count) {
printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
}
}
static void dma_barrier(struct NCR_ESP *esp)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
int count = 500000;
while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
udelay(1);
if(!count) {
printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
}
dregs->cond_reg &= ~(DMA_ENABLE);
return readb(esp->regs + (reg * 4UL));
}
/* This uses various DMA csr fields and the fifo flags count value to
* determine how many bytes were successfully sent/received by the ESP.
*/
static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
int rval = dregs->st_addr - esp->esp_command_dvma;
return rval - fifo_count;
return dma_map_single(esp->dev, buf, sz, dir);
}
static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
return sp->SCp.this_residual;
return dma_map_sg(esp->dev, sg, num_sg, dir);
}
static void dma_drain(struct NCR_ESP *esp)
static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
int count = 500000;
dma_unmap_single(esp->dev, addr, sz, dir);
}
if(dregs->cond_reg & DMA_FIFO_ISDRAIN) {
dregs->cond_reg |= DMA_FIFO_STDRAIN;
while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
udelay(1);
if(!count) {
printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
dma_unmap_sg(esp->dev, sg, num_sg, dir);
}
static int sun3x_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
return 1;
return 0;
}
static void sun3x_esp_reset_dma(struct esp *esp)
{
u32 val;
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_RST_SCSI, DMA_CSR);
dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
/* Enable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val | DMA_INT_ENAB, DMA_CSR);
}
static void sun3x_esp_dma_drain(struct esp *esp)
{
u32 csr;
int lim;
csr = dma_read32(DMA_CSR);
if (!(csr & DMA_FIFO_ISDRAIN))
return;
dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
lim = 1000;
while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
esp->host->unique_id);
break;
}
}
}
static void dma_invalidate(struct NCR_ESP *esp)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
__u32 tmp;
int count = 500000;
while(((tmp = dregs->cond_reg) & DMA_PEND_READ) && (--count > 0))
udelay(1);
if(!count) {
printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
}
dregs->cond_reg = tmp | DMA_FIFO_INV;
dregs->cond_reg &= ~DMA_FIFO_INV;
}
static void dma_dump_state(struct NCR_ESP *esp)
static void sun3x_esp_dma_invalidate(struct esp *esp)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
u32 val;
int lim;
ESPLOG(("esp%d: dma -- cond_reg<%08lx> addr<%08lx>\n",
esp->esp_id, dregs->cond_reg, dregs->st_addr));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
dregs->st_addr = vaddress;
dregs->cond_reg |= (DMA_ST_WRITE | DMA_ENABLE);
}
static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
/* Set up the DMA counters */
dregs->st_addr = vaddress;
dregs->cond_reg = ((dregs->cond_reg & ~(DMA_ST_WRITE)) | DMA_ENABLE);
}
static void dma_ints_off(struct NCR_ESP *esp)
{
DMA_INTSOFF((struct sparc_dma_registers *) esp->dregs);
}
static void dma_ints_on(struct NCR_ESP *esp)
{
DMA_INTSON((struct sparc_dma_registers *) esp->dregs);
}
static int dma_irq_p(struct NCR_ESP *esp)
{
return DMA_IRQ_P((struct sparc_dma_registers *) esp->dregs);
}
static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr)
{
int count = 50;
dma_do_drain(esp);
/* Wait till the first bits settle. */
while((*(volatile unsigned char *)vaddr == 0xff) && (--count > 0))
lim = 1000;
while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: DMA will not "
"invalidate!\n", esp->host->unique_id);
break;
}
udelay(1);
if(!count) {
// printk("%s:%d timeout expire (data %02x)\n", __FILE__, __LINE__,
// esp_read(esp->eregs->esp_fdata));
//mach_halt();
vaddr[0] = esp_read(esp->eregs->esp_fdata);
vaddr[1] = esp_read(esp->eregs->esp_fdata);
}
}
static int dma_ports_p(struct NCR_ESP *esp)
{
return (((struct sparc_dma_registers *) esp->dregs)->cond_reg
& DMA_INT_ENAB);
val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
val |= DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
val &= ~DMA_FIFO_INV;
dma_write32(val, DMA_CSR);
}
/* Resetting various pieces of the ESP scsi driver chipset/buses. */
static void dma_reset(struct NCR_ESP *esp)
static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *)esp->dregs;
u32 csr;
/* Punt the DVMA into a known state. */
dregs->cond_reg |= DMA_RST_SCSI;
dregs->cond_reg &= ~(DMA_RST_SCSI);
DMA_INTSON(dregs);
BUG_ON(!(cmd & ESP_CMD_DMA));
sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
csr = dma_read32(DMA_CSR);
csr |= DMA_ENABLE;
if (write)
csr |= DMA_ST_WRITE;
else
csr &= ~DMA_ST_WRITE;
dma_write32(csr, DMA_CSR);
dma_write32(addr, DMA_ADDR);
scsi_esp_cmd(esp, cmd);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
static int sun3x_esp_dma_error(struct esp *esp)
{
struct sparc_dma_registers *dregs =
(struct sparc_dma_registers *) esp->dregs;
unsigned long nreg = dregs->cond_reg;
u32 csr = dma_read32(DMA_CSR);
// printk("dma_setup %c addr %08x cnt %08x\n",
// write ? 'W' : 'R', addr, count);
if (csr & DMA_HNDL_ERROR)
return 1;
dma_do_drain(esp);
if(write)
nreg |= DMA_ST_WRITE;
else {
nreg &= ~(DMA_ST_WRITE);
}
nreg |= DMA_ENABLE;
dregs->cond_reg = nreg;
dregs->st_addr = addr;
return 0;
}
static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
sp->SCp.have_data_in = dvma_map((unsigned long)sp->SCp.buffer,
sp->SCp.this_residual);
sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in);
}
static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
int sz = sp->SCp.buffers_residual;
struct scatterlist *sg = sp->SCp.buffer;
while (sz >= 0) {
sg[sz].dma_address = dvma_map((unsigned long)sg_virt(&sg[sz]),
sg[sz].length);
sz--;
}
sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address);
}
static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
dvma_unmap((char *)sp->SCp.have_data_in);
}
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
int sz = sp->use_sg - 1;
struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
while(sz >= 0) {
dvma_unmap((char *)sg[sz].dma_address);
sz--;
}
}
static void dma_advance_sg (Scsi_Cmnd *sp)
{
sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address);
}
static int sun3x_esp_release(struct Scsi_Host *instance)
{
/* this code does not support being compiled as a module */
return 1;
}
static struct scsi_host_template driver_template = {
.proc_name = "sun3x_esp",
.proc_info = &esp_proc_info,
.name = "Sun ESP 100/100a/200",
.detect = sun3x_esp_detect,
.release = sun3x_esp_release,
.slave_alloc = esp_slave_alloc,
.slave_destroy = esp_slave_destroy,
.info = esp_info,
.queuecommand = esp_queue,
.eh_abort_handler = esp_abort,
.eh_bus_reset_handler = esp_reset,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
static const struct esp_driver_ops sun3x_esp_ops = {
.esp_write8 = sun3x_esp_write8,
.esp_read8 = sun3x_esp_read8,
.map_single = sun3x_esp_map_single,
.map_sg = sun3x_esp_map_sg,
.unmap_single = sun3x_esp_unmap_single,
.unmap_sg = sun3x_esp_unmap_sg,
.irq_pending = sun3x_esp_irq_pending,
.reset_dma = sun3x_esp_reset_dma,
.dma_drain = sun3x_esp_dma_drain,
.dma_invalidate = sun3x_esp_dma_invalidate,
.send_dma_cmd = sun3x_esp_send_dma_cmd,
.dma_error = sun3x_esp_dma_error,
};
static int __devinit esp_sun3x_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
struct esp *esp;
struct resource *res;
int err = -ENOMEM;
#include "scsi_module.c"
host = scsi_host_alloc(tpnt, sizeof(struct esp));
if (!host)
goto fail;
host->max_id = 8;
esp = shost_priv(host);
esp->host = host;
esp->dev = dev;
esp->ops = &sun3x_esp_ops;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res && !res->start)
goto fail_unlink;
esp->regs = ioremap_nocache(res->start, 0x20);
if (!esp->regs)
goto fail_unmap_regs;
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (!res && !res->start)
goto fail_unmap_regs;
esp->dma_regs = ioremap_nocache(res->start, 0x10);
esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
GFP_KERNEL);
if (!esp->command_block)
goto fail_unmap_regs_dma;
host->irq = platform_get_irq(dev, 0);
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
"SUN3X ESP", esp);
if (err < 0)
goto fail_unmap_command_block;
esp->scsi_id = 7;
esp->host->this_id = esp->scsi_id;
esp->scsi_id_mask = (1 << esp->scsi_id);
esp->cfreq = 20000000;
dev_set_drvdata(&dev->dev, esp);
err = scsi_esp_register(esp, &dev->dev);
if (err)
goto fail_free_irq;
return 0;
fail_free_irq:
free_irq(host->irq, esp);
fail_unmap_command_block:
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
fail_unmap_regs_dma:
iounmap(esp->dma_regs);
fail_unmap_regs:
iounmap(esp->regs);
fail_unlink:
scsi_host_put(host);
fail:
return err;
}
static int __devexit esp_sun3x_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
u32 val;
scsi_esp_unregister(esp);
/* Disable interrupts. */
val = dma_read32(DMA_CSR);
dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
free_irq(irq, esp);
dma_free_coherent(esp->dev, 16,
esp->command_block,
esp->command_block_dma);
scsi_host_put(esp->host);
return 0;
}
static struct platform_driver esp_sun3x_driver = {
.probe = esp_sun3x_probe,
.remove = __devexit_p(esp_sun3x_remove),
.driver = {
.name = "sun3x_esp",
},
};
static int __init sun3x_esp_init(void)
{
return platform_driver_register(&esp_sun3x_driver);
}
static void __exit sun3x_esp_exit(void)
{
platform_driver_unregister(&esp_sun3x_driver);
}
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(sun3x_esp_init);
module_exit(sun3x_esp_exit);

View file

@ -3842,7 +3842,7 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
if (cp->startp == cp->phys.head.lastp ||
sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
&dp_ofs) < 0) {
return cp->data_len;
return cp->data_len - cp->odd_byte_adjustment;
}
/*

View file

@ -1216,7 +1216,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
cpp->xdir = DTD_IN;
return;
}
else if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
cpp->xdir = DTD_OUT;
return;
}

129
include/linux/enclosure.h Normal file
View file

@ -0,0 +1,129 @@
/*
* Enclosure Services
*
* Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the GNU General Public License
** version 2 as published by the Free Software Foundation.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#ifndef _LINUX_ENCLOSURE_H_
#define _LINUX_ENCLOSURE_H_
#include <linux/device.h>
#include <linux/list.h>
/* A few generic types ... taken from ses-2 */
enum enclosure_component_type {
ENCLOSURE_COMPONENT_DEVICE = 0x01,
ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
};
/* ses-2 common element status */
enum enclosure_status {
ENCLOSURE_STATUS_UNSUPPORTED = 0,
ENCLOSURE_STATUS_OK,
ENCLOSURE_STATUS_CRITICAL,
ENCLOSURE_STATUS_NON_CRITICAL,
ENCLOSURE_STATUS_UNRECOVERABLE,
ENCLOSURE_STATUS_NOT_INSTALLED,
ENCLOSURE_STATUS_UNKNOWN,
ENCLOSURE_STATUS_UNAVAILABLE,
};
/* SFF-8485 activity light settings */
enum enclosure_component_setting {
ENCLOSURE_SETTING_DISABLED = 0,
ENCLOSURE_SETTING_ENABLED = 1,
ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2,
ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3,
ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6,
ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7,
};
struct enclosure_device;
struct enclosure_component;
struct enclosure_component_callbacks {
void (*get_status)(struct enclosure_device *,
struct enclosure_component *);
int (*set_status)(struct enclosure_device *,
struct enclosure_component *,
enum enclosure_status);
void (*get_fault)(struct enclosure_device *,
struct enclosure_component *);
int (*set_fault)(struct enclosure_device *,
struct enclosure_component *,
enum enclosure_component_setting);
void (*get_active)(struct enclosure_device *,
struct enclosure_component *);
int (*set_active)(struct enclosure_device *,
struct enclosure_component *,
enum enclosure_component_setting);
void (*get_locate)(struct enclosure_device *,
struct enclosure_component *);
int (*set_locate)(struct enclosure_device *,
struct enclosure_component *,
enum enclosure_component_setting);
};
struct enclosure_component {
void *scratch;
struct class_device cdev;
enum enclosure_component_type type;
int number;
int fault;
int active;
int locate;
enum enclosure_status status;
};
struct enclosure_device {
void *scratch;
struct list_head node;
struct class_device cdev;
struct enclosure_component_callbacks *cb;
int components;
struct enclosure_component component[0];
};
static inline struct enclosure_device *
to_enclosure_device(struct class_device *dev)
{
return container_of(dev, struct enclosure_device, cdev);
}
static inline struct enclosure_component *
to_enclosure_component(struct class_device *dev)
{
return container_of(dev, struct enclosure_component, cdev);
}
struct enclosure_device *
enclosure_register(struct device *, const char *, int,
struct enclosure_component_callbacks *);
void enclosure_unregister(struct enclosure_device *);
struct enclosure_component *
enclosure_component_register(struct enclosure_device *, unsigned int,
enum enclosure_component_type, const char *);
int enclosure_add_device(struct enclosure_device *enclosure, int component,
struct device *dev);
int enclosure_remove_device(struct enclosure_device *enclosure, int component);
struct enclosure_device *enclosure_find(struct device *dev);
int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
void *data);
#endif /* _LINUX_ENCLOSURE_H_ */

View file

@ -45,8 +45,8 @@
/* initiator tags; opaque for target */
typedef uint32_t __bitwise__ itt_t;
/* below makes sense only for initiator that created this tag */
#define build_itt(itt, id, age) ((__force itt_t)\
((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT)))
#define build_itt(itt, age) ((__force itt_t)\
((itt) | ((age) << ISCSI_AGE_SHIFT)))
#define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK)
#define RESERVED_ITT ((__force itt_t)0xffffffff)

View file

@ -70,8 +70,6 @@ enum {
#define ISCSI_SUSPEND_BIT 1
#define ISCSI_ITT_MASK (0xfff)
#define ISCSI_CID_SHIFT 12
#define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT)
#define ISCSI_AGE_SHIFT 28
#define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
@ -135,6 +133,14 @@ static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
return (void*)ctask->hdr + ctask->hdr_len;
}
/* Connection's states */
enum {
ISCSI_CONN_INITIAL_STAGE,
ISCSI_CONN_STARTED,
ISCSI_CONN_STOPPED,
ISCSI_CONN_CLEANUP_WAIT,
};
struct iscsi_conn {
struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
void *dd_data; /* iscsi_transport data */
@ -227,6 +233,17 @@ struct iscsi_pool {
int max; /* Max number of elements */
};
/* Session's states */
enum {
ISCSI_STATE_FREE = 1,
ISCSI_STATE_LOGGED_IN,
ISCSI_STATE_FAILED,
ISCSI_STATE_TERMINATE,
ISCSI_STATE_IN_RECOVERY,
ISCSI_STATE_RECOVERY_FAILED,
ISCSI_STATE_LOGGING_OUT,
};
struct iscsi_session {
/*
* Syncs up the scsi eh thread with the iscsi eh thread when sending
@ -325,6 +342,10 @@ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
#define session_to_cls(_sess) \
hostdata_session(_sess->host->hostdata)
#define iscsi_session_printk(prefix, _sess, fmt, a...) \
iscsi_cls_session_printk(prefix, \
(struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
/*
* connection management
*/
@ -339,6 +360,9 @@ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf);
#define iscsi_conn_printk(prefix, _c, fmt, a...) \
iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
/*
* pdu and task processing
*/
@ -349,8 +373,6 @@ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
char *, uint32_t);
extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
char *, int);
extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
char *, int);
extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
uint32_t *);
extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);

View file

@ -235,6 +235,20 @@ static inline int scsi_status_is_good(int status)
#define TYPE_RBC 0x0e
#define TYPE_NO_LUN 0x7f
/* SCSI protocols; these are taken from SPC-3 section 7.5 */
enum scsi_protocol {
SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
SCSI_PROTOCOL_SBP = 3, /* firewire */
SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
SCSI_PROTOCOL_ISCSI = 5,
SCSI_PROTOCOL_SAS = 6,
SCSI_PROTOCOL_ADT = 7, /* Media Changers */
SCSI_PROTOCOL_ATA = 8,
SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
};
/* Returns a human-readable name for the device */
extern const char * scsi_device_type(unsigned type);

View file

@ -280,39 +280,45 @@ struct scsi_host_template {
* If the host wants to be called before the scan starts, but
* after the midlayer has set up ready for the scan, it can fill
* in this function.
*
* Status: OPTIONAL
*/
void (* scan_start)(struct Scsi_Host *);
/*
* fill in this function to allow the queue depth of this host
* to be changeable (on a per device basis). returns either
* Fill in this function to allow the queue depth of this host
* to be changeable (on a per device basis). Returns either
* the current queue depth setting (may be different from what
* was passed in) or an error. An error should only be
* returned if the requested depth is legal but the driver was
* unable to set it. If the requested depth is illegal, the
* driver should set and return the closest legal queue depth.
*
* Status: OPTIONAL
*/
int (* change_queue_depth)(struct scsi_device *, int);
/*
* fill in this function to allow the changing of tag types
* Fill in this function to allow the changing of tag types
* (this also allows the enabling/disabling of tag command
* queueing). An error should only be returned if something
* went wrong in the driver while trying to set the tag type.
* If the driver doesn't support the requested tag type, then
* it should set the closest type it does support without
* returning an error. Returns the actual tag type set.
*
* Status: OPTIONAL
*/
int (* change_queue_type)(struct scsi_device *, int);
/*
* This function determines the bios parameters for a given
* This function determines the BIOS parameters for a given
* harddisk. These tend to be numbers that are made up by
* the host adapter. Parameters:
* size, device, list (heads, sectors, cylinders)
*
* Status: OPTIONAL */
* Status: OPTIONAL
*/
int (* bios_param)(struct scsi_device *, struct block_device *,
sector_t, int []);
@ -351,7 +357,7 @@ struct scsi_host_template {
/*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme, It is set to the maximum number
* or an interrupt driven scheme. It is set to the maximum number
* of simultaneous commands a given host adapter will accept.
*/
int can_queue;
@ -372,12 +378,12 @@ struct scsi_host_template {
unsigned short sg_tablesize;
/*
* If the host adapter has limitations beside segment count
* Set this if the host adapter has limitations beside segment count.
*/
unsigned short max_sectors;
/*
* dma scatter gather segment boundary limit. a segment crossing this
* DMA scatter gather segment boundary limit. A segment crossing this
* boundary will be split in two.
*/
unsigned long dma_boundary;
@ -386,7 +392,7 @@ struct scsi_host_template {
* This specifies "machine infinity" for host templates which don't
* limit the transfer size. Note this limit represents an absolute
* maximum, and may be over the transfer limits allowed for
* individual devices (e.g. 256 for SCSI-1)
* individual devices (e.g. 256 for SCSI-1).
*/
#define SCSI_DEFAULT_MAX_SECTORS 1024
@ -413,12 +419,12 @@ struct scsi_host_template {
unsigned supported_mode:2;
/*
* true if this host adapter uses unchecked DMA onto an ISA bus.
* True if this host adapter uses unchecked DMA onto an ISA bus.
*/
unsigned unchecked_isa_dma:1;
/*
* true if this host adapter can make good use of clustering.
* True if this host adapter can make good use of clustering.
* I originally thought that if the tablesize was large that it
* was a waste of CPU cycles to prepare a cluster list, but
* it works out that the Buslogic is faster if you use a smaller
@ -428,7 +434,7 @@ struct scsi_host_template {
unsigned use_clustering:1;
/*
* True for emulated SCSI host adapters (e.g. ATAPI)
* True for emulated SCSI host adapters (e.g. ATAPI).
*/
unsigned emulated:1;
@ -438,12 +444,12 @@ struct scsi_host_template {
unsigned skip_settle_delay:1;
/*
* ordered write support
* True if we are using ordered write support.
*/
unsigned ordered_tag:1;
/*
* Countdown for host blocking with no commands outstanding
* Countdown for host blocking with no commands outstanding.
*/
unsigned int max_host_blocked;
@ -522,8 +528,8 @@ struct Scsi_Host {
struct scsi_transport_template *transportt;
/*
* area to keep a shared tag map (if needed, will be
* NULL if not)
* Area to keep a shared tag map (if needed, will be
* NULL if not).
*/
struct blk_queue_tag *bqt;
@ -596,16 +602,16 @@ struct Scsi_Host {
/*
* Host uses correct SCSI ordering not PC ordering. The bit is
* set for the minority of drivers whose authors actually read
* the spec ;)
* the spec ;).
*/
unsigned reverse_ordering:1;
/*
* ordered write support
* Ordered write support
*/
unsigned ordered_tag:1;
/* task mgmt function in progress */
/* Task mgmt function in progress */
unsigned tmf_in_progress:1;
/* Asynchronous scan in progress */

View file

@ -149,13 +149,6 @@ extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size);
/* Connection's states */
#define ISCSI_CONN_INITIAL_STAGE 0
#define ISCSI_CONN_STARTED 1
#define ISCSI_CONN_STOPPED 2
#define ISCSI_CONN_CLEANUP_WAIT 3
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
void *dd_data; /* LLD private data */
@ -169,27 +162,31 @@ struct iscsi_cls_conn {
#define iscsi_dev_to_conn(_dev) \
container_of(_dev, struct iscsi_cls_conn, dev)
/* Session's states */
#define ISCSI_STATE_FREE 1
#define ISCSI_STATE_LOGGED_IN 2
#define ISCSI_STATE_FAILED 3
#define ISCSI_STATE_TERMINATE 4
#define ISCSI_STATE_IN_RECOVERY 5
#define ISCSI_STATE_RECOVERY_FAILED 6
#define ISCSI_STATE_LOGGING_OUT 7
#define iscsi_conn_to_session(_conn) \
iscsi_dev_to_session(_conn->dev.parent)
/* iscsi class session state */
enum {
ISCSI_SESSION_LOGGED_IN,
ISCSI_SESSION_FAILED,
ISCSI_SESSION_FREE,
};
struct iscsi_cls_session {
struct list_head sess_list; /* item in session_list */
struct list_head host_list;
struct iscsi_transport *transport;
spinlock_t lock;
struct work_struct scan_work;
struct work_struct unbind_work;
/* recovery fields */
int recovery_tmo;
struct delayed_work recovery_work;
struct work_struct unbind_work;
int target_id;
int state;
int sid; /* session id */
void *dd_data; /* LLD private data */
struct device dev; /* sysfs transport/container device */
@ -206,14 +203,22 @@ struct iscsi_cls_session {
struct iscsi_host {
struct list_head sessions;
atomic_t nr_scans;
struct mutex mutex;
struct workqueue_struct *unbind_workq;
char unbind_workq_name[KOBJ_NAME_LEN];
struct workqueue_struct *scan_workq;
char scan_workq_name[KOBJ_NAME_LEN];
};
/*
* session and connection functions that can be used by HW iSCSI LLDs
*/
#define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \
dev_printk(prefix, &(_cls_session)->dev, fmt, ##a)
#define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \
dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
extern int iscsi_session_chkready(struct iscsi_cls_session *session);
extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
struct iscsi_transport *transport);
extern int iscsi_add_session(struct iscsi_cls_session *session,
@ -231,6 +236,6 @@ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
extern void iscsi_unblock_session(struct iscsi_cls_session *session);
extern void iscsi_block_session(struct iscsi_cls_session *session);
extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
#endif