1
0
Fork 0

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (258 commits)
  [libata] conversion to new debug scheme, part 1 of $N
  [PATCH] libata: Add ata_scsi_dev_disabled
  [libata] Add host lock to struct ata_port
  [PATCH] libata: implement per-dev EH action mask eh_info->dev_action[]
  [PATCH] libata-dev: move the CDB-intr DMA blacklisting
  [PATCH] ahci: disable NCQ support on vt8251
  [libata] ahci: add JMicron PCI IDs
  [libata] sata_nv: add PCI IDs
  [libata] ahci: Add NVIDIA PCI IDs.
  [PATCH] libata: convert several bmdma-style controllers to new EH, take #3
  [PATCH] sata_via: convert to new EH, take #3
  [libata] sata_nv: s/spin_lock_irqsave/spin_lock/ in irq handler
  [PATCH] sata_nv: add hotplug support
  [PATCH] sata_nv: convert to new EH
  [PATCH] sata_nv: better irq handlers
  [PATCH] sata_nv: simplify constants
  [PATCH] sata_nv: kill struct nv_host_desc and nv_host
  [PATCH] sata_nv: kill not-working hotplug code
  [libata] Update docs to reflect current driver API
  [PATCH] libata: add host_set->next for legacy two host_sets case, take #3
  ...
hifive-unleashed-5.1
Linus Torvalds 2006-06-23 15:58:44 -07:00
commit 6edad161cd
32 changed files with 6577 additions and 2405 deletions

View File

@ -169,6 +169,22 @@ void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
</sect2>
<sect2><title>PIO data read/write</title>
<programlisting>
void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
</programlisting>
<para>
All bmdma-style drivers must implement this hook. This is the low-level
operation that actually copies the data bytes during a PIO data
transfer.
Typically the driver
will choose one of ata_pio_data_xfer_noirq(), ata_pio_data_xfer(), or
ata_mmio_data_xfer().
</para>
</sect2>
<sect2><title>ATA command execute</title>
<programlisting>
void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
@ -204,11 +220,10 @@ command.
<programlisting>
u8 (*check_status)(struct ata_port *ap);
u8 (*check_altstatus)(struct ata_port *ap);
u8 (*check_err)(struct ata_port *ap);
</programlisting>
<para>
Reads the Status/AltStatus/Error ATA shadow register from
Reads the Status/AltStatus ATA shadow register from
hardware. On some hardware, reading the Status register has
the side effect of clearing the interrupt condition.
Most drivers for taskfile-based hardware use
@ -269,23 +284,6 @@ void (*set_mode) (struct ata_port *ap);
</sect2>
<sect2><title>Reset ATA bus</title>
<programlisting>
void (*phy_reset) (struct ata_port *ap);
</programlisting>
<para>
The very first step in the probe phase. Actions vary depending
on the bus type, typically. After waking up the device and probing
for device presence (PATA and SATA), typically a soft reset
(SRST) will be performed. Drivers typically use the helper
functions ata_bus_reset() or sata_phy_reset() for this hook.
Many SATA drivers use sata_phy_reset() or call it from within
their own phy_reset() functions.
</para>
</sect2>
<sect2><title>Control PCI IDE BMDMA engine</title>
<programlisting>
void (*bmdma_setup) (struct ata_queued_cmd *qc);
@ -354,16 +352,74 @@ int (*qc_issue) (struct ata_queued_cmd *qc);
</sect2>
<sect2><title>Timeout (error) handling</title>
<sect2><title>Exception and probe handling (EH)</title>
<programlisting>
void (*eng_timeout) (struct ata_port *ap);
void (*phy_reset) (struct ata_port *ap);
</programlisting>
<para>
This is a high level error handling function, called from the
error handling thread, when a command times out. Most newer
hardware will implement its own error handling code here. IDE BMDMA
drivers may use the helper function ata_eng_timeout().
Deprecated. Use ->error_handler() instead.
</para>
<programlisting>
void (*freeze) (struct ata_port *ap);
void (*thaw) (struct ata_port *ap);
</programlisting>
<para>
ata_port_freeze() is called when HSM violations or some other
condition disrupts normal operation of the port. A frozen port
is not allowed to perform any operation until the port is
thawed, which usually follows a successful reset.
</para>
<para>
The optional ->freeze() callback can be used for freezing the port
hardware-wise (e.g. mask interrupt and stop DMA engine). If a
port cannot be frozen hardware-wise, the interrupt handler
must ack and clear interrupts unconditionally while the port
is frozen.
</para>
<para>
The optional ->thaw() callback is called to perform the opposite of ->freeze():
prepare the port for normal operation once again. Unmask interrupts,
start DMA engine, etc.
</para>
<programlisting>
void (*error_handler) (struct ata_port *ap);
</programlisting>
<para>
->error_handler() is a driver's hook into probe, hotplug, and recovery
and other exceptional conditions. The primary responsibility of an
implementation is to call ata_do_eh() or ata_bmdma_drive_eh() with a set
of EH hooks as arguments:
</para>
<para>
'prereset' hook (may be NULL) is called during an EH reset, before any other actions
are taken.
</para>
<para>
'postreset' hook (may be NULL) is called after the EH reset is performed. Based on
existing conditions, severity of the problem, and hardware capabilities,
</para>
<para>
Either 'softreset' (may be NULL) or 'hardreset' (may be NULL) will be
called to perform the low-level EH reset.
</para>
<programlisting>
void (*post_internal_cmd) (struct ata_queued_cmd *qc);
</programlisting>
<para>
Perform any hardware-specific actions necessary to finish processing
after executing a probe-time or EH-time command via ata_exec_internal().
</para>
</sect2>

View File

@ -74,6 +74,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
{ 0 }
};
@ -488,7 +489,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
/* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
/* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
/* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
/* 17 */ DECLARE_AMD_DEV("AMD5536"),
/* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"),
/* 18 */ DECLARE_AMD_DEV("AMD5536"),
};
static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@ -525,7 +527,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);

View File

@ -165,7 +165,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
libata-objs := libata-core.o libata-scsi.o libata-bmdma.o
libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean

View File

@ -48,7 +48,7 @@
#include <asm/io.h>
#define DRV_NAME "ahci"
#define DRV_VERSION "1.2"
#define DRV_VERSION "1.3"
enum {
@ -56,12 +56,15 @@ enum {
AHCI_MAX_SG = 168, /* hardware max is 64K */
AHCI_DMA_BOUNDARY = 0xffffffff,
AHCI_USE_CLUSTERING = 0,
AHCI_CMD_SLOT_SZ = 32 * 32,
AHCI_MAX_CMDS = 32,
AHCI_CMD_SZ = 32,
AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
AHCI_RX_FIS_SZ = 256,
AHCI_CMD_TBL_HDR = 0x80,
AHCI_CMD_TBL_CDB = 0x40,
AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16),
AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ +
AHCI_CMD_TBL_HDR_SZ = 0x80,
AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
AHCI_RX_FIS_SZ,
AHCI_IRQ_ON_SG = (1 << 31),
AHCI_CMD_ATAPI = (1 << 5),
@ -71,8 +74,10 @@ enum {
AHCI_CMD_CLR_BUSY = (1 << 10),
RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
board_ahci = 0,
board_ahci_vt8251 = 1,
/* global controller registers */
HOST_CAP = 0x00, /* host capabilities */
@ -87,8 +92,9 @@ enum {
HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
/* HOST_CAP bits */
HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
HOST_CAP_CLO = (1 << 24), /* Command List Override support */
HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
/* registers for each SATA port */
PORT_LST_ADDR = 0x00, /* command list DMA addr */
@ -127,15 +133,17 @@ enum {
PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
PORT_IRQ_FATAL = PORT_IRQ_TF_ERR |
PORT_IRQ_HBUS_ERR |
PORT_IRQ_HBUS_DATA_ERR |
PORT_IRQ_IF_ERR,
DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY |
PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE |
PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS |
PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS |
PORT_IRQ_D2H_REG_FIS,
PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
PORT_IRQ_IF_ERR |
PORT_IRQ_CONNECT |
PORT_IRQ_PHYRDY |
PORT_IRQ_UNK_FIS,
PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
PORT_IRQ_TF_ERR |
PORT_IRQ_HBUS_DATA_ERR,
DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
/* PORT_CMD bits */
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
@ -153,6 +161,10 @@ enum {
/* hpriv->flags bits */
AHCI_FLAG_MSI = (1 << 0),
/* ap->flags bits */
AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
AHCI_FLAG_NO_NCQ = (1 << 25),
};
struct ahci_cmd_hdr {
@ -181,7 +193,6 @@ struct ahci_port_priv {
dma_addr_t cmd_slot_dma;
void *cmd_tbl;
dma_addr_t cmd_tbl_dma;
struct ahci_sg *cmd_tbl_sg;
void *rx_fis;
dma_addr_t rx_fis_dma;
};
@ -191,15 +202,16 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes);
static void ahci_irq_clear(struct ata_port *ap);
static void ahci_eng_timeout(struct ata_port *ap);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void ahci_qc_prep(struct ata_queued_cmd *qc);
static u8 ahci_check_status(struct ata_port *ap);
static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static void ahci_remove_one (struct pci_dev *pdev);
static struct scsi_host_template ahci_sht = {
@ -207,7 +219,8 @@ static struct scsi_host_template ahci_sht = {
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.change_queue_depth = ata_scsi_change_queue_depth,
.can_queue = AHCI_MAX_CMDS - 1,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = AHCI_MAX_SG,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@ -216,6 +229,7 @@ static struct scsi_host_template ahci_sht = {
.proc_name = DRV_NAME,
.dma_boundary = AHCI_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -228,19 +242,21 @@ static const struct ata_port_operations ahci_ops = {
.tf_read = ahci_tf_read,
.probe_reset = ahci_probe_reset,
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
.eng_timeout = ahci_eng_timeout,
.irq_handler = ahci_interrupt,
.irq_clear = ahci_irq_clear,
.scr_read = ahci_scr_read,
.scr_write = ahci_scr_write,
.freeze = ahci_freeze,
.thaw = ahci_thaw,
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.port_start = ahci_port_start,
.port_stop = ahci_port_stop,
};
@ -250,7 +266,19 @@ static const struct ata_port_info ahci_port_info[] = {
{
.sht = &ahci_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_SKIP_D2H_BSY,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &ahci_ops,
},
/* board_ahci_vt8251 */
{
.sht = &ahci_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_SKIP_D2H_BSY |
AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
.port_ops = &ahci_ops,
@ -258,6 +286,7 @@ static const struct ata_port_info ahci_port_info[] = {
};
static const struct pci_device_id ahci_pci_tbl[] = {
/* Intel */
{ PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ICH6 */
{ PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
@ -288,14 +317,39 @@ static const struct pci_device_id ahci_pci_tbl[] = {
board_ahci }, /* ICH8M */
{ PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ICH8M */
/* JMicron */
{ 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* JMicron JMB360 */
{ 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* JMicron JMB361 */
{ 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* JMicron JMB363 */
{ 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* JMicron JMB365 */
{ 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* JMicron JMB366 */
/* ATI */
{ PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ATI SB600 non-raid */
{ PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ATI SB600 raid */
/* VIA */
{ PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci_vt8251 }, /* VIA VT8251 */
/* NVIDIA */
{ PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* MCP65 */
{ PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* MCP65 */
{ PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* MCP65 */
{ PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* MCP65 */
{ } /* terminate list */
};
@ -374,8 +428,6 @@ static int ahci_port_start(struct ata_port *ap)
pp->cmd_tbl = mem;
pp->cmd_tbl_dma = mem_dma;
pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
ap->private_data = pp;
if (hpriv->cap & HOST_CAP_64)
@ -508,46 +560,71 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
return ata_dev_classify(&tf);
}
static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, u32 opts)
static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts)
{
pp->cmd_slot[0].opts = cpu_to_le32(opts);
pp->cmd_slot[0].status = 0;
pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
dma_addr_t cmd_tbl_dma;
cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
pp->cmd_slot[tag].opts = cpu_to_le32(opts);
pp->cmd_slot[tag].status = 0;
pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
}
static int ahci_poll_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec,
unsigned long timeout_msec)
static int ahci_clo(struct ata_port *ap)
{
unsigned long timeout;
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
struct ahci_host_priv *hpriv = ap->host_set->private_data;
u32 tmp;
timeout = jiffies + (timeout_msec * HZ) / 1000;
do {
tmp = readl(reg);
if ((tmp & mask) == val)
return 0;
msleep(interval_msec);
} while (time_before(jiffies, timeout));
if (!(hpriv->cap & HOST_CAP_CLO))
return -EOPNOTSUPP;
return -1;
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_CLO;
writel(tmp, port_mmio + PORT_CMD);
tmp = ata_wait_register(port_mmio + PORT_CMD,
PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
if (tmp & PORT_CMD_CLO)
return -EIO;
return 0;
}
static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
static int ahci_prereset(struct ata_port *ap)
{
if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
(ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
/* ATA_BUSY hasn't cleared, so send a CLO */
ahci_clo(ap);
}
return ata_std_prereset(ap);
}
static int ahci_softreset(struct ata_port *ap, unsigned int *class)
{
struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
const u32 cmd_fis_len = 5; /* five dwords */
const char *reason = NULL;
struct ata_taskfile tf;
u32 tmp;
u8 *fis;
int rc;
DPRINTK("ENTER\n");
if (ata_port_offline(ap)) {
DPRINTK("PHY reports no device\n");
*class = ATA_DEV_NONE;
return 0;
}
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_stop_engine(ap);
if (rc) {
@ -558,23 +635,13 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
/* check BUSY/DRQ, perform Command List Override if necessary */
ahci_tf_read(ap, &tf);
if (tf.command & (ATA_BUSY | ATA_DRQ)) {
u32 tmp;
rc = ahci_clo(ap);
if (!(hpriv->cap & HOST_CAP_CLO)) {
rc = -EIO;
reason = "port busy but no CLO";
if (rc == -EOPNOTSUPP) {
reason = "port busy but CLO unavailable";
goto fail_restart;
}
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_CLO;
writel(tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
if (ahci_poll_register(port_mmio + PORT_CMD, PORT_CMD_CLO, 0x0,
1, 500)) {
rc = -EIO;
reason = "CLO failed";
} else if (rc) {
reason = "port busy but CLO failed";
goto fail_restart;
}
}
@ -582,20 +649,21 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
/* restart engine */
ahci_start_engine(ap);
ata_tf_init(ap, &tf, 0);
ata_tf_init(ap->device, &tf);
fis = pp->cmd_tbl;
/* issue the first D2H Register FIS */
ahci_fill_cmd_slot(pp, cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
ahci_fill_cmd_slot(pp, 0,
cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
tf.ctl |= ATA_SRST;
ata_tf_to_fis(&tf, fis, 0);
fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
writel(1, port_mmio + PORT_CMD_ISSUE);
readl(port_mmio + PORT_CMD_ISSUE); /* flush */
if (ahci_poll_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x0, 1, 500)) {
tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
if (tmp & 0x1) {
rc = -EIO;
reason = "1st FIS failed";
goto fail;
@ -605,7 +673,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
msleep(1);
/* issue the second D2H Register FIS */
ahci_fill_cmd_slot(pp, cmd_fis_len);
ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
tf.ctl &= ~ATA_SRST;
ata_tf_to_fis(&tf, fis, 0);
@ -625,7 +693,7 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
msleep(150);
*class = ATA_DEV_NONE;
if (sata_dev_present(ap)) {
if (ata_port_online(ap)) {
if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
rc = -EIO;
reason = "device not ready";
@ -640,25 +708,31 @@ static int ahci_softreset(struct ata_port *ap, int verbose, unsigned int *class)
fail_restart:
ahci_start_engine(ap);
fail:
if (verbose)
printk(KERN_ERR "ata%u: softreset failed (%s)\n",
ap->id, reason);
else
DPRINTK("EXIT, rc=%d reason=\"%s\"\n", rc, reason);
ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
return rc;
}
static int ahci_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
{
struct ahci_port_priv *pp = ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
int rc;
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
rc = sata_std_hardreset(ap, verbose, class);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(ap->device, &tf);
tf.command = 0xff;
ata_tf_to_fis(&tf, d2h_fis, 0);
rc = sata_std_hardreset(ap, class);
ahci_start_engine(ap);
if (rc == 0)
if (rc == 0 && ata_port_online(ap))
*class = ahci_dev_classify(ap);
if (*class == ATA_DEV_UNKNOWN)
*class = ATA_DEV_NONE;
@ -686,13 +760,6 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
}
}
static int ahci_probe_reset(struct ata_port *ap, unsigned int *classes)
{
return ata_drive_probe_reset(ap, ata_std_probeinit,
ahci_softreset, ahci_hardreset,
ahci_postreset, classes);
}
static u8 ahci_check_status(struct ata_port *ap)
{
void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@ -708,9 +775,8 @@ static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
ata_tf_from_fis(d2h_fis, tf);
}
static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
{
struct ahci_port_priv *pp = qc->ap->private_data;
struct scatterlist *sg;
struct ahci_sg *ahci_sg;
unsigned int n_sg = 0;
@ -720,7 +786,7 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
/*
* Next, the S/G list.
*/
ahci_sg = pp->cmd_tbl_sg;
ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
ata_for_each_sg(sg, qc) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
@ -741,6 +807,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
int is_atapi = is_atapi_taskfile(&qc->tf);
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
unsigned int n_elem;
@ -749,16 +816,17 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
* Fill in command table information. First, the header,
* a SATA Register - Host to Device command FIS.
*/
ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
if (is_atapi) {
memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb,
qc->dev->cdb_len);
memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
n_elem = 0;
if (qc->flags & ATA_QCFLAG_DMAMAP)
n_elem = ahci_fill_sg(qc);
n_elem = ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
@ -769,112 +837,122 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
if (is_atapi)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, opts);
ahci_fill_cmd_slot(pp, qc->tag, opts);
}
static void ahci_restart_port(struct ata_port *ap, u32 irq_stat)
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp;
struct ahci_port_priv *pp = ap->private_data;
struct ata_eh_info *ehi = &ap->eh_info;
unsigned int err_mask = 0, action = 0;
struct ata_queued_cmd *qc;
u32 serror;
if ((ap->device[0].class != ATA_DEV_ATAPI) ||
((irq_stat & PORT_IRQ_TF_ERR) == 0))
printk(KERN_WARNING "ata%u: port reset, "
"p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
ap->id,
irq_stat,
readl(mmio + HOST_IRQ_STAT),
readl(port_mmio + PORT_IRQ_STAT),
readl(port_mmio + PORT_CMD),
readl(port_mmio + PORT_TFDATA),
readl(port_mmio + PORT_SCR_STAT),
readl(port_mmio + PORT_SCR_ERR));
ata_ehi_clear_desc(ehi);
/* stop DMA */
ahci_stop_engine(ap);
/* AHCI needs SError cleared; otherwise, it might lock up */
serror = ahci_scr_read(ap, SCR_ERROR);
ahci_scr_write(ap, SCR_ERROR, serror);
/* clear SATA phy error, if any */
tmp = readl(port_mmio + PORT_SCR_ERR);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* analyze @irq_stat */
ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
/* if DRQ/BSY is set, device needs to be reset.
* if so, issue COMRESET
*/
tmp = readl(port_mmio + PORT_TFDATA);
if (tmp & (ATA_BUSY | ATA_DRQ)) {
writel(0x301, port_mmio + PORT_SCR_CTL);
readl(port_mmio + PORT_SCR_CTL); /* flush */
udelay(10);
writel(0x300, port_mmio + PORT_SCR_CTL);
readl(port_mmio + PORT_SCR_CTL); /* flush */
if (irq_stat & PORT_IRQ_TF_ERR)
err_mask |= AC_ERR_DEV;
if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
err_mask |= AC_ERR_HOST_BUS;
action |= ATA_EH_SOFTRESET;
}
/* re-start DMA */
ahci_start_engine(ap);
}
if (irq_stat & PORT_IRQ_IF_ERR) {
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_SOFTRESET;
ata_ehi_push_desc(ehi, ", interface fatal error");
}
static void ahci_eng_timeout(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
void __iomem *mmio = host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
struct ata_queued_cmd *qc;
unsigned long flags;
if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
"connection status changed" : "PHY RDY changed");
}
printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id);
if (irq_stat & PORT_IRQ_UNK_FIS) {
u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
spin_lock_irqsave(&host_set->lock, flags);
err_mask |= AC_ERR_HSM;
action |= ATA_EH_SOFTRESET;
ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
unk[0], unk[1], unk[2], unk[3]);
}
/* okay, let's hand over to EH */
ehi->serror |= serror;
ehi->action |= action;
ahci_restart_port(ap, readl(port_mmio + PORT_IRQ_STAT));
qc = ata_qc_from_tag(ap, ap->active_tag);
qc->err_mask |= AC_ERR_TIMEOUT;
if (qc)
qc->err_mask |= err_mask;
else
ehi->err_mask |= err_mask;
spin_unlock_irqrestore(&host_set->lock, flags);
ata_eh_qc_complete(qc);
if (irq_stat & PORT_IRQ_FREEZE)
ata_port_freeze(ap);
else
ata_port_abort(ap);
}
static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
static void ahci_host_intr(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 status, serr, ci;
serr = readl(port_mmio + PORT_SCR_ERR);
writel(serr, port_mmio + PORT_SCR_ERR);
struct ata_eh_info *ehi = &ap->eh_info;
u32 status, qc_active;
int rc;
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
ci = readl(port_mmio + PORT_CMD_ISSUE);
if (likely((ci & 0x1) == 0)) {
if (qc) {
WARN_ON(qc->err_mask);
ata_qc_complete(qc);
qc = NULL;
}
if (unlikely(status & PORT_IRQ_ERROR)) {
ahci_error_intr(ap, status);
return;
}
if (status & PORT_IRQ_FATAL) {
unsigned int err_mask;
if (status & PORT_IRQ_TF_ERR)
err_mask = AC_ERR_DEV;
else if (status & PORT_IRQ_IF_ERR)
err_mask = AC_ERR_ATA_BUS;
else
err_mask = AC_ERR_HOST_BUS;
if (ap->sactive)
qc_active = readl(port_mmio + PORT_SCR_ACT);
else
qc_active = readl(port_mmio + PORT_CMD_ISSUE);
/* command processing has stopped due to error; restart */
ahci_restart_port(ap, status);
if (qc) {
qc->err_mask |= err_mask;
ata_qc_complete(qc);
}
rc = ata_qc_complete_multiple(ap, qc_active, NULL);
if (rc > 0)
return;
if (rc < 0) {
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
ata_port_freeze(ap);
return;
}
return 1;
/* hmmm... a spurious interupt */
/* some devices send D2H reg with I bit set during NCQ command phase */
if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
return;
/* ignore interim PIO setup fis interrupts */
if (ata_tag_valid(ap->active_tag)) {
struct ata_queued_cmd *qc =
ata_qc_from_tag(ap, ap->active_tag);
if (qc && qc->tf.protocol == ATA_PROT_PIO &&
(status & PORT_IRQ_PIOS_FIS))
return;
}
if (ata_ratelimit())
ata_port_printk(ap, KERN_INFO, "spurious interrupt "
"(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
status, ap->active_tag, ap->sactive);
}
static void ahci_irq_clear(struct ata_port *ap)
@ -882,7 +960,7 @@ static void ahci_irq_clear(struct ata_port *ap)
/* TODO */
}
static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ahci_host_priv *hpriv;
@ -911,14 +989,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
ap = host_set->ports[i];
if (ap) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!ahci_host_intr(ap, qc))
if (ata_ratelimit())
dev_printk(KERN_WARNING, host_set->dev,
"unhandled interrupt on port %u\n",
i);
ahci_host_intr(ap);
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
@ -935,7 +1006,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
handled = 1;
}
spin_unlock(&host_set->lock);
spin_unlock(&host_set->lock);
VPRINTK("EXIT\n");
@ -947,12 +1018,65 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
writel(1, port_mmio + PORT_CMD_ISSUE);
if (qc->tf.protocol == ATA_PROT_NCQ)
writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
readl(port_mmio + PORT_CMD_ISSUE); /* flush */
return 0;
}
static void ahci_freeze(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
/* turn IRQ off */
writel(0, port_mmio + PORT_IRQ_MASK);
}
static void ahci_thaw(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp;
/* clear IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->id, mmio + HOST_IRQ_STAT);
/* turn IRQ back on */
writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
}
static void ahci_error_handler(struct ata_port *ap)
{
if (!(ap->flags & ATA_FLAG_FROZEN)) {
/* restart engine */
ahci_stop_engine(ap);
ahci_start_engine(ap);
}
/* perform recovery */
ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
ahci_postreset);
}
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
if (qc->flags & ATA_QCFLAG_FAILED)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
/* make DMA engine forget about the failed command */
ahci_stop_engine(ap);
ahci_start_engine(ap);
}
}
static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
unsigned int port_idx)
{
@ -1097,9 +1221,6 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << i, mmio + HOST_IRQ_STAT);
/* set irq mask (enables interrupts) */
writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
}
tmp = readl(mmio + HOST_CTL);
@ -1197,6 +1318,8 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
VPRINTK("ENTER\n");
WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@ -1264,6 +1387,10 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_hpriv;
if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
(hpriv->cap & HOST_CAP_NCQ))
probe_ent->host_flags |= ATA_FLAG_NCQ;
ahci_print_info(probe_ent);
/* FIXME: check ata_device_add return value */
@ -1295,21 +1422,17 @@ static void ahci_remove_one (struct pci_dev *pdev)
struct device *dev = pci_dev_to_dev(pdev);
struct ata_host_set *host_set = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host_set->private_data;
struct ata_port *ap;
unsigned int i;
int have_msi;
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
scsi_remove_host(ap->host);
}
for (i = 0; i < host_set->n_ports; i++)
ata_port_detach(host_set->ports[i]);
have_msi = hpriv->flags & AHCI_FLAG_MSI;
free_irq(host_set->irq, host_set);
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
struct ata_port *ap = host_set->ports[i];
ata_scsi_release(ap->host);
scsi_host_put(ap->host);

View File

@ -93,7 +93,7 @@
#include <linux/libata.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "1.05"
#define DRV_VERSION "1.10"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@ -146,11 +146,10 @@ struct piix_map_db {
static int piix_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent);
static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes);
static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes);
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
static void piix_pata_error_handler(struct ata_port *ap);
static void piix_sata_error_handler(struct ata_port *ap);
static unsigned int in_module_init = 1;
@ -159,6 +158,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
{ 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
{ 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
{ 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
#endif
/* NOTE: The following PCI ids must be kept in sync with the
@ -218,6 +218,7 @@ static struct scsi_host_template piix_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
.resume = ata_scsi_device_resume,
.suspend = ata_scsi_device_suspend,
@ -227,6 +228,7 @@ static const struct ata_port_operations piix_pata_ops = {
.port_disable = ata_port_disable,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
@ -234,16 +236,18 @@ static const struct ata_port_operations piix_pata_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.probe_reset = piix_pata_probe_reset,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = piix_pata_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
@ -262,16 +266,18 @@ static const struct ata_port_operations piix_sata_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.probe_reset = piix_sata_probe_reset,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = piix_sata_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
@ -455,59 +461,51 @@ cbl40:
}
/**
* piix_pata_probeinit - probeinit for PATA host controller
* piix_pata_prereset - prereset for PATA host controller
* @ap: Target port
*
* Probeinit including cable detection.
* Prereset including cable detection.
*
* LOCKING:
* None (inherited from caller).
*/
static void piix_pata_probeinit(struct ata_port *ap)
{
piix_pata_cbl_detect(ap);
ata_std_probeinit(ap);
}
/**
* piix_pata_probe_reset - Perform reset on PATA port and classify
* @ap: Port to reset
* @classes: Resulting classes of attached devices
*
* Reset PATA phy and classify attached devices.
*
* LOCKING:
* None (inherited from caller).
*/
static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes)
static int piix_pata_prereset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
return 0;
}
return ata_drive_probe_reset(ap, piix_pata_probeinit,
ata_std_softreset, NULL,
ata_std_postreset, classes);
piix_pata_cbl_detect(ap);
return ata_std_prereset(ap);
}
static void piix_pata_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, piix_pata_prereset, ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* piix_sata_probe - Probe PCI device for present SATA devices
* @ap: Port associated with the PCI device we wish to probe
* piix_sata_prereset - prereset for SATA host controller
* @ap: Target port
*
* Reads and configures SATA PCI device's PCI config register
* Port Configuration and Status (PCS) to determine port and
* device availability.
* device availability. Return -ENODEV to skip reset if no
* device is present.
*
* LOCKING:
* None (inherited from caller).
*
* RETURNS:
* Mask of avaliable devices on the port.
* 0 if device is present, -ENODEV otherwise.
*/
static unsigned int piix_sata_probe (struct ata_port *ap)
static int piix_sata_prereset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
const unsigned int *map = ap->host_set->private_data;
@ -549,29 +547,19 @@ static unsigned int piix_sata_probe (struct ata_port *ap)
DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
ap->id, pcs, present_mask);
return present_mask;
}
/**
* piix_sata_probe_reset - Perform reset on SATA port and classify
* @ap: Port to reset
* @classes: Resulting classes of attached devices
*
* Reset SATA phy and classify attached devices.
*
* LOCKING:
* None (inherited from caller).
*/
static int piix_sata_probe_reset(struct ata_port *ap, unsigned int *classes)
{
if (!piix_sata_probe(ap)) {
printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id);
if (!present_mask) {
ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
return 0;
}
return ata_drive_probe_reset(ap, ata_std_probeinit,
ata_std_softreset, NULL,
ata_std_postreset, classes);
return ata_std_prereset(ap);
}
static void piix_sata_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL,
ata_std_postreset);
}
/**
@ -760,15 +748,15 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
pci_read_config_word(pdev, 0x41, &cfg);
/* Only on the original revision: IDE DMA can hang */
if(rev == 0x00)
if (rev == 0x00)
no_piix_dma = 1;
/* On all revisions below 5 PXB bus lock must be disabled for IDE */
else if(cfg & (1<<14) && rev < 5)
else if (cfg & (1<<14) && rev < 5)
no_piix_dma = 2;
}
if(no_piix_dma)
if (no_piix_dma)
dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
if(no_piix_dma == 2)
if (no_piix_dma == 2)
dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
return no_piix_dma;
}

View File

@ -652,6 +652,151 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
ata_altstatus(ap); /* dummy read */
}
/**
* ata_bmdma_freeze - Freeze BMDMA controller port
* @ap: port to freeze
*
* Freeze BMDMA controller port.
*
* LOCKING:
* Inherited from caller.
*/
void ata_bmdma_freeze(struct ata_port *ap)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
ap->ctl |= ATA_NIEN;
ap->last_ctl = ap->ctl;
if (ap->flags & ATA_FLAG_MMIO)
writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
else
outb(ap->ctl, ioaddr->ctl_addr);
}
/**
* ata_bmdma_thaw - Thaw BMDMA controller port
* @ap: port to thaw
*
* Thaw BMDMA controller port.
*
* LOCKING:
* Inherited from caller.
*/
void ata_bmdma_thaw(struct ata_port *ap)
{
/* clear & re-enable interrupts */
ata_chk_status(ap);
ap->ops->irq_clear(ap);
if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
ata_irq_on(ap);
}
/**
* ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
* @ap: port to handle error for
* @prereset: prereset method (can be NULL)
* @softreset: softreset method (can be NULL)
* @hardreset: hardreset method (can be NULL)
* @postreset: postreset method (can be NULL)
*
* Handle error for ATA BMDMA controller. It can handle both
* PATA and SATA controllers. Many controllers should be able to
* use this EH as-is or with some added handling before and
* after.
*
* This function is intended to be used for constructing
* ->error_handler callback by low level drivers.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset)
{
struct ata_eh_context *ehc = &ap->eh_context;
struct ata_queued_cmd *qc;
unsigned long flags;
int thaw = 0;
qc = __ata_qc_from_tag(ap, ap->active_tag);
if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
qc = NULL;
/* reset PIO HSM and stop DMA engine */
spin_lock_irqsave(ap->lock, flags);
ap->hsm_task_state = HSM_ST_IDLE;
if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
u8 host_stat;
host_stat = ata_bmdma_status(ap);
ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
/* BMDMA controllers indicate host bus error by
* setting DMA_ERR bit and timing out. As it wasn't
* really a timeout event, adjust error mask and
* cancel frozen state.
*/
if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
qc->err_mask = AC_ERR_HOST_BUS;
thaw = 1;
}
ap->ops->bmdma_stop(qc);
}
ata_altstatus(ap);
ata_chk_status(ap);
ap->ops->irq_clear(ap);
spin_unlock_irqrestore(ap->lock, flags);
if (thaw)
ata_eh_thaw_port(ap);
/* PIO and DMA engines have been stopped, perform recovery */
ata_do_eh(ap, prereset, softreset, hardreset, postreset);
}
/**
* ata_bmdma_error_handler - Stock error handler for BMDMA controller
* @ap: port to handle error for
*
* Stock error handler for BMDMA controller.
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_bmdma_error_handler(struct ata_port *ap)
{
ata_reset_fn_t hardreset;
hardreset = NULL;
if (sata_scr_valid(ap))
hardreset = sata_std_hardreset;
ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
ata_std_postreset);
}
/**
* ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
* BMDMA controller
* @qc: internal command to clean up
*
* LOCKING:
* Kernel thread context (may sleep)
*/
void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
{
ata_bmdma_stop(qc);
}
#ifdef CONFIG_PCI
static struct ata_probe_ent *
ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
@ -930,10 +1075,21 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
/* FIXME: check ata_device_add return */
if (legacy_mode) {
if (legacy_mode & (1 << 0))
struct device *dev = &pdev->dev;
struct ata_host_set *host_set = NULL;
if (legacy_mode & (1 << 0)) {
ata_device_add(probe_ent);
if (legacy_mode & (1 << 1))
host_set = dev_get_drvdata(dev);
}
if (legacy_mode & (1 << 1)) {
ata_device_add(probe_ent2);
if (host_set) {
host_set->next = dev_get_drvdata(dev);
dev_set_drvdata(dev, host_set);
}
}
} else
ata_device_add(probe_ent);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -29,10 +29,9 @@
#define __LIBATA_H__
#define DRV_NAME "libata"
#define DRV_VERSION "1.20" /* must be exactly four chars */
#define DRV_VERSION "1.30" /* must be exactly four chars */
struct ata_scsi_args {
struct ata_port *ap;
struct ata_device *dev;
u16 *id;
struct scsi_cmnd *cmd;
@ -40,18 +39,32 @@ struct ata_scsi_args {
};
/* libata-core.c */
extern struct workqueue_struct *ata_aux_wq;
extern int atapi_enabled;
extern int atapi_dmadir;
extern int libata_fua;
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
struct ata_device *dev);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
extern void ata_dev_disable(struct ata_device *dev);
extern void ata_port_flush_task(struct ata_port *ap);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen);
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int post_reset, u16 *id);
extern int ata_dev_configure(struct ata_device *dev, int print_info);
extern int sata_down_spd_limit(struct ata_port *ap);
extern int sata_set_spd_needed(struct ata_port *ap);
extern int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0);
extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
extern void ata_qc_free(struct ata_queued_cmd *qc);
extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern void __ata_qc_complete(struct ata_queued_cmd *qc);
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
extern void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep);
extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
extern void ata_dev_init(struct ata_device *dev);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
@ -60,6 +73,8 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct scsi_transport_template ata_scsi_transport_template;
extern void ata_scsi_scan_host(struct ata_port *ap);
extern int ata_scsi_offline_dev(struct ata_device *dev);
extern void ata_scsi_hotplug(void *data);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen);
@ -88,5 +103,13 @@ extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen));
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(void *data);
/* libata-eh.c */
extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern void ata_scsi_error(struct Scsi_Host *host);
extern void ata_port_wait_eh(struct ata_port *ap);
extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
#endif /* __LIBATA_H__ */

View File

@ -46,7 +46,7 @@
#include <linux/libata.h>
#define DRV_NAME "pdc_adma"
#define DRV_VERSION "0.03"
#define DRV_VERSION "0.04"
/* macro to calculate base address for ATA regs */
#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
@ -152,6 +152,7 @@ static struct scsi_host_template adma_ata_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ADMA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -167,6 +168,7 @@ static const struct ata_port_operations adma_ata_ops = {
.qc_prep = adma_qc_prep,
.qc_issue = adma_qc_issue,
.eng_timeout = adma_eng_timeout,
.data_xfer = ata_mmio_data_xfer,
.irq_handler = adma_intr,
.irq_clear = adma_irq_clear,
.port_start = adma_port_start,
@ -455,13 +457,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
continue;
handled = 1;
adma_enter_reg_mode(ap);
if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))
if (ap->flags & ATA_FLAG_DISABLED)
continue;
pp = ap->private_data;
if (!pp || pp->state != adma_state_pkt)
continue;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
if ((status & (aPERR | aPSD | aUIRQ)))
qc->err_mask |= AC_ERR_OTHER;
else if (pp->pkt[0] != cDONE)
@ -480,13 +482,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) {
if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
struct ata_queued_cmd *qc;
struct adma_port_priv *pp = ap->private_data;
if (!pp || pp->state != adma_state_mmio)
continue;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
/* check main status, clearing INTRQ */
u8 status = ata_check_status(ap);

View File

@ -93,7 +93,7 @@ enum {
MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
ATA_FLAG_NO_ATAPI),
ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
CRQB_FLAG_READ = (1 << 0),
@ -272,33 +272,33 @@ enum chip_type {
/* Command ReQuest Block: 32B */
struct mv_crqb {
u32 sg_addr;
u32 sg_addr_hi;
u16 ctrl_flags;
u16 ata_cmd[11];
__le32 sg_addr;
__le32 sg_addr_hi;
__le16 ctrl_flags;
__le16 ata_cmd[11];
};
struct mv_crqb_iie {
u32 addr;
u32 addr_hi;
u32 flags;
u32 len;
u32 ata_cmd[4];
__le32 addr;
__le32 addr_hi;
__le32 flags;
__le32 len;
__le32 ata_cmd[4];
};
/* Command ResPonse Block: 8B */
struct mv_crpb {
u16 id;
u16 flags;
u32 tmstmp;
__le16 id;
__le16 flags;
__le32 tmstmp;
};
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
u32 addr;
u32 flags_size;
u32 addr_hi;
u32 reserved;
__le32 addr;
__le32 flags_size;
__le32 addr_hi;
__le32 reserved;
};
struct mv_port_priv {
@ -390,6 +390,7 @@ static struct scsi_host_template mv_sht = {
.proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -406,6 +407,7 @@ static const struct ata_port_operations mv5_ops = {
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.data_xfer = ata_mmio_data_xfer,
.eng_timeout = mv_eng_timeout,
@ -433,6 +435,7 @@ static const struct ata_port_operations mv6_ops = {
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.data_xfer = ata_mmio_data_xfer,
.eng_timeout = mv_eng_timeout,
@ -683,7 +686,7 @@ static void mv_stop_dma(struct ata_port *ap)
}
if (EDMA_EN & reg) {
printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id);
ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
/* FIXME: Consider doing a reset here to recover */
}
}
@ -1028,7 +1031,7 @@ static inline unsigned mv_inc_q_index(unsigned index)
return (index + 1) & MV_MAX_Q_DEPTH_MASK;
}
static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
@ -1051,7 +1054,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct mv_port_priv *pp = ap->private_data;
u16 *cw;
__le16 *cw;
struct ata_taskfile *tf;
u16 flags = 0;
unsigned in_index;
@ -1307,8 +1310,8 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
if (EDMA_ERR_SERR & edma_err_cause) {
serr = scr_read(ap, SCR_ERROR);
scr_write_flush(ap, SCR_ERROR, serr);
sata_scr_read(ap, SCR_ERROR, &serr);
sata_scr_write_flush(ap, SCR_ERROR, serr);
}
if (EDMA_ERR_SELF_DIS & edma_err_cause) {
struct mv_port_priv *pp = ap->private_data;
@ -1377,7 +1380,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
/* Note that DEV_IRQ might happen spuriously during EDMA,
* and should be ignored in such cases.
* The cause of this is still under investigation.
*/
*/
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
/* EDMA: check for response queue interrupt */
if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
@ -1398,7 +1401,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
}
}
if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))
if (ap && (ap->flags & ATA_FLAG_DISABLED))
continue;
err_mask = ac_err_mask(ata_status);
@ -1419,7 +1422,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
VPRINTK("port %u IRQ found for qc, "
"ata_status 0x%x\n", port,ata_status);
/* mark qc status appropriately */
if (!(qc->tf.ctl & ATA_NIEN)) {
if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
qc->err_mask |= err_mask;
ata_qc_complete(qc);
}
@ -1949,15 +1952,16 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
/* Issue COMRESET via SControl */
comreset_retry:
scr_write_flush(ap, SCR_CONTROL, 0x301);
sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
__msleep(1, can_sleep);
scr_write_flush(ap, SCR_CONTROL, 0x300);
sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
__msleep(20, can_sleep);
timeout = jiffies + msecs_to_jiffies(200);
do {
sstatus = scr_read(ap, SCR_STATUS) & 0x3;
sata_scr_read(ap, SCR_STATUS, &sstatus);
sstatus &= 0x3;
if ((sstatus == 3) || (sstatus == 0))
break;
@ -1974,11 +1978,12 @@ comreset_retry:
"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
if (sata_dev_present(ap)) {
if (ata_port_online(ap)) {
ata_port_probe(ap);
} else {
printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
ap->id, scr_read(ap, SCR_STATUS));
sata_scr_read(ap, SCR_STATUS, &sstatus);
ata_port_printk(ap, KERN_INFO,
"no device found (phy stat %08x)\n", sstatus);
ata_port_disable(ap);
return;
}
@ -2005,7 +2010,7 @@ comreset_retry:
tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
dev->class = ata_dev_classify(&tf);
if (!ata_dev_present(dev)) {
if (!ata_dev_enabled(dev)) {
VPRINTK("Port disabled post-sig: No device present.\n");
ata_port_disable(ap);
}
@ -2037,7 +2042,7 @@ static void mv_eng_timeout(struct ata_port *ap)
struct ata_queued_cmd *qc;
unsigned long flags;
printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
DPRINTK("All regs @ start of eng_timeout\n");
mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
to_pci_dev(ap->host_set->dev));

View File

@ -44,7 +44,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
#define DRV_VERSION "0.8"
#define DRV_VERSION "0.9"
enum {
NV_PORTS = 2,
@ -54,40 +54,25 @@ enum {
NV_PORT0_SCR_REG_OFFSET = 0x00,
NV_PORT1_SCR_REG_OFFSET = 0x40,
/* INT_STATUS/ENABLE */
NV_INT_STATUS = 0x10,
NV_INT_STATUS_CK804 = 0x440,
NV_INT_STATUS_PDEV_INT = 0x01,
NV_INT_STATUS_PDEV_PM = 0x02,
NV_INT_STATUS_PDEV_ADDED = 0x04,
NV_INT_STATUS_PDEV_REMOVED = 0x08,
NV_INT_STATUS_SDEV_INT = 0x10,
NV_INT_STATUS_SDEV_PM = 0x20,
NV_INT_STATUS_SDEV_ADDED = 0x40,
NV_INT_STATUS_SDEV_REMOVED = 0x80,
NV_INT_STATUS_PDEV_HOTPLUG = (NV_INT_STATUS_PDEV_ADDED |
NV_INT_STATUS_PDEV_REMOVED),
NV_INT_STATUS_SDEV_HOTPLUG = (NV_INT_STATUS_SDEV_ADDED |
NV_INT_STATUS_SDEV_REMOVED),
NV_INT_STATUS_HOTPLUG = (NV_INT_STATUS_PDEV_HOTPLUG |
NV_INT_STATUS_SDEV_HOTPLUG),
NV_INT_ENABLE = 0x11,
NV_INT_STATUS_CK804 = 0x440,
NV_INT_ENABLE_CK804 = 0x441,
NV_INT_ENABLE_PDEV_MASK = 0x01,
NV_INT_ENABLE_PDEV_PM = 0x02,
NV_INT_ENABLE_PDEV_ADDED = 0x04,
NV_INT_ENABLE_PDEV_REMOVED = 0x08,
NV_INT_ENABLE_SDEV_MASK = 0x10,
NV_INT_ENABLE_SDEV_PM = 0x20,
NV_INT_ENABLE_SDEV_ADDED = 0x40,
NV_INT_ENABLE_SDEV_REMOVED = 0x80,
NV_INT_ENABLE_PDEV_HOTPLUG = (NV_INT_ENABLE_PDEV_ADDED |
NV_INT_ENABLE_PDEV_REMOVED),
NV_INT_ENABLE_SDEV_HOTPLUG = (NV_INT_ENABLE_SDEV_ADDED |
NV_INT_ENABLE_SDEV_REMOVED),
NV_INT_ENABLE_HOTPLUG = (NV_INT_ENABLE_PDEV_HOTPLUG |
NV_INT_ENABLE_SDEV_HOTPLUG),
/* INT_STATUS/ENABLE bits */
NV_INT_DEV = 0x01,
NV_INT_PM = 0x02,
NV_INT_ADDED = 0x04,
NV_INT_REMOVED = 0x08,
NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
NV_INT_ALL = 0x0f,
NV_INT_MASK = NV_INT_DEV |
NV_INT_ADDED | NV_INT_REMOVED,
/* INT_CONFIG */
NV_INT_CONFIG = 0x12,
NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
@ -97,23 +82,27 @@ enum {
};
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static irqreturn_t nv_interrupt (int irq, void *dev_instance,
struct pt_regs *regs);
static void nv_ck804_host_stop(struct ata_host_set *host_set);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
struct pt_regs *regs);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
struct pt_regs *regs);
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
struct pt_regs *regs);
static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void nv_host_stop (struct ata_host_set *host_set);
static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
static void nv_disable_hotplug(struct ata_host_set *host_set);
static int nv_check_hotplug(struct ata_host_set *host_set);
static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
static int nv_check_hotplug_ck804(struct ata_host_set *host_set);
static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
static void nv_error_handler(struct ata_port *ap);
enum nv_host_type
{
GENERIC,
NFORCE2,
NFORCE3,
NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
CK804
};
@ -140,6 +129,16 @@ static const struct pci_device_id nv_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
@ -149,46 +148,6 @@ static const struct pci_device_id nv_pci_tbl[] = {
{ 0, } /* terminate list */
};
struct nv_host_desc
{
enum nv_host_type host_type;
void (*enable_hotplug)(struct ata_probe_ent *probe_ent);
void (*disable_hotplug)(struct ata_host_set *host_set);
int (*check_hotplug)(struct ata_host_set *host_set);
};
static struct nv_host_desc nv_device_tbl[] = {
{
.host_type = GENERIC,
.enable_hotplug = NULL,
.disable_hotplug= NULL,
.check_hotplug = NULL,
},
{
.host_type = NFORCE2,
.enable_hotplug = nv_enable_hotplug,
.disable_hotplug= nv_disable_hotplug,
.check_hotplug = nv_check_hotplug,
},
{
.host_type = NFORCE3,
.enable_hotplug = nv_enable_hotplug,
.disable_hotplug= nv_disable_hotplug,
.check_hotplug = nv_check_hotplug,
},
{ .host_type = CK804,
.enable_hotplug = nv_enable_hotplug_ck804,
.disable_hotplug= nv_disable_hotplug_ck804,
.check_hotplug = nv_check_hotplug_ck804,
},
};
struct nv_host
{
struct nv_host_desc *host_desc;
unsigned long host_flags;
};
static struct pci_driver nv_pci_driver = {
.name = DRV_NAME,
.id_table = nv_pci_tbl,
@ -210,51 +169,119 @@ static struct scsi_host_template nv_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
static const struct ata_port_operations nv_ops = {
static const struct ata_port_operations nv_generic_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.exec_command = ata_exec_command,
.check_status = ata_check_status,
.dev_select = ata_std_dev_select,
.phy_reset = sata_phy_reset,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.irq_handler = nv_interrupt,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = nv_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.data_xfer = ata_pio_data_xfer,
.irq_handler = nv_generic_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = nv_host_stop,
.host_stop = ata_pci_host_stop,
};
/* FIXME: The hardware provides the necessary SATA PHY controls
* to support ATA_FLAG_SATA_RESET. However, it is currently
* necessary to disable that flag, to solve misdetection problems.
* See http://bugme.osdl.org/show_bug.cgi?id=3352 for more info.
*
* This problem really needs to be investigated further. But in the
* meantime, we avoid ATA_FLAG_SATA_RESET to get people working.
*/
static struct ata_port_info nv_port_info = {
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA |
/* ATA_FLAG_SATA_RESET | */
ATA_FLAG_SRST |
ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_ops,
static const struct ata_port_operations nv_nf2_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.exec_command = ata_exec_command,
.check_status = ata_check_status,
.dev_select = ata_std_dev_select,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.freeze = nv_nf2_freeze,
.thaw = nv_nf2_thaw,
.error_handler = nv_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.data_xfer = ata_pio_data_xfer,
.irq_handler = nv_nf2_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_pci_host_stop,
};
static const struct ata_port_operations nv_ck804_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.exec_command = ata_exec_command,
.check_status = ata_check_status,
.dev_select = ata_std_dev_select,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.freeze = nv_ck804_freeze,
.thaw = nv_ck804_thaw,
.error_handler = nv_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.data_xfer = ata_pio_data_xfer,
.irq_handler = nv_ck804_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = nv_ck804_host_stop,
};
static struct ata_port_info nv_port_info[] = {
/* generic */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_generic_ops,
},
/* nforce2/3 */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_nf2_ops,
},
/* ck804 */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_ck804_ops,
},
};
MODULE_AUTHOR("NVIDIA");
@ -263,11 +290,10 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
static irqreturn_t nv_interrupt (int irq, void *dev_instance,
struct pt_regs *regs)
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct nv_host *host = host_set->private_data;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
@ -279,11 +305,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
ap = host_set->ports[i];
if (ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN)))
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += ata_host_intr(ap, qc);
else
// No request pending? Clear interrupt status
@ -293,14 +319,88 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
}
if (host->host_desc->check_hotplug)
handled += host->host_desc->check_hotplug(host_set);
spin_unlock_irqrestore(&host_set->lock, flags);
return IRQ_RETVAL(handled);
}
static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
{
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
int handled;
/* freeze if hotplugged */
if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
ata_port_freeze(ap);
return 1;
}
/* bail out if not our interrupt */
if (!(irq_stat & NV_INT_DEV))
return 0;
/* DEV interrupt w/ no active qc? */
if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
ata_check_status(ap);
return 1;
}
/* handle interrupt */
handled = ata_host_intr(ap, qc);
if (unlikely(!handled)) {
/* spurious, clear it */
ata_check_status(ap);
}
return 1;
}
static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
{
int i, handled = 0;
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
if (ap && !(ap->flags & ATA_FLAG_DISABLED))
handled += nv_host_intr(ap, irq_stat);
irq_stat >>= NV_INT_PORT_SHIFT;
}
return IRQ_RETVAL(handled);
}
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host_set->lock);
irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
ret = nv_do_interrupt(host_set, irq_stat);
spin_unlock(&host_set->lock);
return ret;
}
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host_set->lock);
irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
ret = nv_do_interrupt(host_set, irq_stat);
spin_unlock(&host_set->lock);
return ret;
}
static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
if (sc_reg > SCR_CONTROL)
@ -317,23 +417,74 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
}
static void nv_host_stop (struct ata_host_set *host_set)
static void nv_nf2_freeze(struct ata_port *ap)
{
struct nv_host *host = host_set->private_data;
unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
// Disable hotplug event interrupts.
if (host->host_desc->disable_hotplug)
host->host_desc->disable_hotplug(host_set);
mask = inb(scr_addr + NV_INT_ENABLE);
mask &= ~(NV_INT_ALL << shift);
outb(mask, scr_addr + NV_INT_ENABLE);
}
kfree(host);
static void nv_nf2_thaw(struct ata_port *ap)
{
unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
ata_pci_host_stop(host_set);
outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
mask = inb(scr_addr + NV_INT_ENABLE);
mask |= (NV_INT_MASK << shift);
outb(mask, scr_addr + NV_INT_ENABLE);
}
static void nv_ck804_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
mask = readb(mmio_base + NV_INT_ENABLE_CK804);
mask &= ~(NV_INT_ALL << shift);
writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
}
static void nv_ck804_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
mask = readb(mmio_base + NV_INT_ENABLE_CK804);
mask |= (NV_INT_MASK << shift);
writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
}
static int nv_hardreset(struct ata_port *ap, unsigned int *class)
{
unsigned int dummy;
/* SATA hardreset fails to retrieve proper device signature on
* some controllers. Don't classify on hardreset. For more
* info, see http://bugme.osdl.org/show_bug.cgi?id=3352
*/
return sata_std_hardreset(ap, &dummy);
}
static void nv_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
nv_hardreset, ata_std_postreset);
}
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version = 0;
struct nv_host *host;
struct ata_port_info *ppi;
struct ata_probe_ent *probe_ent;
int pci_dev_busy = 0;
@ -370,24 +521,15 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -ENOMEM;
ppi = &nv_port_info;
ppi = &nv_port_info[ent->driver_data];
probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
if (!probe_ent)
goto err_out_regions;
host = kmalloc(sizeof(struct nv_host), GFP_KERNEL);
if (!host)
goto err_out_free_ent;
memset(host, 0, sizeof(struct nv_host));
host->host_desc = &nv_device_tbl[ent->driver_data];
probe_ent->private_data = host;
probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
if (!probe_ent->mmio_base) {
rc = -EIO;
goto err_out_free_host;
goto err_out_free_ent;
}
base = (unsigned long)probe_ent->mmio_base;
@ -395,24 +537,27 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
/* enable SATA space for CK804 */
if (ent->driver_data == CK804) {
u8 regval;
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
pci_set_master(pdev);
rc = ata_device_add(probe_ent);
if (rc != NV_PORTS)
goto err_out_iounmap;
// Enable hotplug event interrupts.
if (host->host_desc->enable_hotplug)
host->host_desc->enable_hotplug(probe_ent);
kfree(probe_ent);
return 0;
err_out_iounmap:
pci_iounmap(pdev, probe_ent->mmio_base);
err_out_free_host:
kfree(host);
err_out_free_ent:
kfree(probe_ent);
err_out_regions:
@ -424,127 +569,17 @@ err_out:
return rc;
}
static void nv_enable_hotplug(struct ata_probe_ent *probe_ent)
{
u8 intr_mask;
outb(NV_INT_STATUS_HOTPLUG,
probe_ent->port[0].scr_addr + NV_INT_STATUS);
intr_mask = inb(probe_ent->port[0].scr_addr + NV_INT_ENABLE);
intr_mask |= NV_INT_ENABLE_HOTPLUG;
outb(intr_mask, probe_ent->port[0].scr_addr + NV_INT_ENABLE);
}
static void nv_disable_hotplug(struct ata_host_set *host_set)
{
u8 intr_mask;
intr_mask = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
}
static int nv_check_hotplug(struct ata_host_set *host_set)
{
u8 intr_status;
intr_status = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
// Clear interrupt status.
outb(0xff, host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
if (intr_status & NV_INT_STATUS_HOTPLUG) {
if (intr_status & NV_INT_STATUS_PDEV_ADDED)
printk(KERN_WARNING "nv_sata: "
"Primary device added\n");
if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
printk(KERN_WARNING "nv_sata: "
"Primary device removed\n");
if (intr_status & NV_INT_STATUS_SDEV_ADDED)
printk(KERN_WARNING "nv_sata: "
"Secondary device added\n");
if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
printk(KERN_WARNING "nv_sata: "
"Secondary device removed\n");
return 1;
}
return 0;
}
static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
{
struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
u8 intr_mask;
u8 regval;
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
writeb(NV_INT_STATUS_HOTPLUG, probe_ent->mmio_base + NV_INT_STATUS_CK804);
intr_mask = readb(probe_ent->mmio_base + NV_INT_ENABLE_CK804);
intr_mask |= NV_INT_ENABLE_HOTPLUG;
writeb(intr_mask, probe_ent->mmio_base + NV_INT_ENABLE_CK804);
}
static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
static void nv_ck804_host_stop(struct ata_host_set *host_set)
{
struct pci_dev *pdev = to_pci_dev(host_set->dev);
u8 intr_mask;
u8 regval;
intr_mask = readb(host_set->mmio_base + NV_INT_ENABLE_CK804);
intr_mask &= ~(NV_INT_ENABLE_HOTPLUG);
writeb(intr_mask, host_set->mmio_base + NV_INT_ENABLE_CK804);
/* disable SATA space for CK804 */
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
static int nv_check_hotplug_ck804(struct ata_host_set *host_set)
{
u8 intr_status;
intr_status = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
// Clear interrupt status.
writeb(0xff, host_set->mmio_base + NV_INT_STATUS_CK804);
if (intr_status & NV_INT_STATUS_HOTPLUG) {
if (intr_status & NV_INT_STATUS_PDEV_ADDED)
printk(KERN_WARNING "nv_sata: "
"Primary device added\n");
if (intr_status & NV_INT_STATUS_PDEV_REMOVED)
printk(KERN_WARNING "nv_sata: "
"Primary device removed\n");
if (intr_status & NV_INT_STATUS_SDEV_ADDED)
printk(KERN_WARNING "nv_sata: "
"Secondary device added\n");
if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
printk(KERN_WARNING "nv_sata: "
"Secondary device removed\n");
return 1;
}
return 0;
ata_pci_host_stop(host_set);
}
static int __init nv_init(void)

View File

@ -76,7 +76,8 @@ enum {
PDC_RESET = (1 << 11), /* HDMA reset */
PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI,
ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
ATA_FLAG_PIO_POLLING,
};
@ -120,6 +121,7 @@ static struct scsi_host_template pdc_ata_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -136,6 +138,7 @@ static const struct ata_port_operations pdc_sata_ops = {
.qc_prep = pdc_qc_prep,
.qc_issue = pdc_qc_issue_prot,
.eng_timeout = pdc_eng_timeout,
.data_xfer = ata_mmio_data_xfer,
.irq_handler = pdc_interrupt,
.irq_clear = pdc_irq_clear,
@ -158,6 +161,7 @@ static const struct ata_port_operations pdc_pata_ops = {
.qc_prep = pdc_qc_prep,
.qc_issue = pdc_qc_issue_prot,
.data_xfer = ata_mmio_data_xfer,
.eng_timeout = pdc_eng_timeout,
.irq_handler = pdc_interrupt,
.irq_clear = pdc_irq_clear,
@ -363,12 +367,23 @@ static void pdc_sata_phy_reset(struct ata_port *ap)
sata_phy_reset(ap);
}
static void pdc_pata_cbl_detect(struct ata_port *ap)
{
u8 tmp;
void __iomem *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT + 0x03;
tmp = readb(mmio);
if (tmp & 0x01) {
ap->cbl = ATA_CBL_PATA40;
ap->udma_mask &= ATA_UDMA_MASK_40C;
} else
ap->cbl = ATA_CBL_PATA80;
}
static void pdc_pata_phy_reset(struct ata_port *ap)
{
/* FIXME: add cable detect. Don't assume 40-pin cable */
ap->cbl = ATA_CBL_PATA40;
ap->udma_mask &= ATA_UDMA_MASK_40C;
pdc_pata_cbl_detect(ap);
pdc_reset_port(ap);
ata_port_probe(ap);
ata_bus_reset(ap);
@ -435,7 +450,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
case ATA_PROT_NODATA:
printk(KERN_ERR "ata%u: command timeout\n", ap->id);
ata_port_printk(ap, KERN_ERR, "command timeout\n");
drv_stat = ata_wait_idle(ap);
qc->err_mask |= __ac_err_mask(drv_stat);
break;
@ -443,8 +458,9 @@ static void pdc_eng_timeout(struct ata_port *ap)
default:
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
ap->id, qc->tf.command, drv_stat);
ata_port_printk(ap, KERN_ERR,
"unknown timeout, cmd 0x%x stat 0x%x\n",
qc->tf.command, drv_stat);
qc->err_mask |= ac_err_mask(drv_stat);
break;
@ -533,11 +549,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
ap = host_set->ports[i];
tmp = mask & (1 << (i + 1));
if (tmp && ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN)))
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc_host_intr(ap, qc);
}
}
@ -676,10 +692,6 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
/*
* If this driver happens to only be useful on Apple's K2, then
* we should check that here as it has a normal Serverworks ID
*/
rc = pci_enable_device(pdev);
if (rc)
return rc;

View File

@ -41,7 +41,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_qstor"
#define DRV_VERSION "0.05"
#define DRV_VERSION "0.06"
enum {
QS_PORTS = 4,
@ -142,6 +142,7 @@ static struct scsi_host_template qs_ata_sht = {
.proc_name = DRV_NAME,
.dma_boundary = QS_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -156,6 +157,7 @@ static const struct ata_port_operations qs_ata_ops = {
.phy_reset = qs_phy_reset,
.qc_prep = qs_qc_prep,
.qc_issue = qs_qc_issue,
.data_xfer = ata_mmio_data_xfer,
.eng_timeout = qs_eng_timeout,
.irq_handler = qs_intr,
.irq_clear = qs_irq_clear,
@ -175,7 +177,7 @@ static const struct ata_port_info qs_port_info[] = {
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SATA_RESET |
//FIXME ATA_FLAG_SRST |
ATA_FLAG_MMIO,
ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
.pio_mask = 0x10, /* pio4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &qs_ata_ops,
@ -394,14 +396,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
if (ap && !(ap->flags &
(ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_pkt)
continue;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
switch (sHST) {
case 0: /* successful CPB */
case 3: /* device error */
@ -428,13 +429,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
struct ata_port *ap;
ap = host_set->ports[port_no];
if (ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_mmio)
continue;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
/* check main status, clearing INTRQ */
u8 status = ata_check_status(ap);

View File

@ -46,7 +46,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_sil"
#define DRV_VERSION "0.9"
#define DRV_VERSION "1.0"
enum {
/*
@ -54,8 +54,9 @@ enum {
*/
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO,
ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
/*
* Controller IDs
@ -84,6 +85,20 @@ enum {
/* BMDMA/BMDMA2 */
SIL_INTR_STEERING = (1 << 1),
SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
/* SIEN */
SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
/*
* Others
*/
@ -96,6 +111,10 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void sil_post_set_mode (struct ata_port *ap);
static irqreturn_t sil_interrupt(int irq, void *dev_instance,
struct pt_regs *regs);
static void sil_freeze(struct ata_port *ap);
static void sil_thaw(struct ata_port *ap);
static const struct pci_device_id sil_pci_tbl[] = {
@ -155,6 +174,7 @@ static struct scsi_host_template sil_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -166,7 +186,6 @@ static const struct ata_port_operations sil_ops = {
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.probe_reset = ata_std_probe_reset,
.post_set_mode = sil_post_set_mode,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
@ -174,8 +193,12 @@ static const struct ata_port_operations sil_ops = {
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.data_xfer = ata_mmio_data_xfer,
.freeze = sil_freeze,
.thaw = sil_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = sil_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.scr_read = sil_scr_read,
.scr_write = sil_scr_write,
@ -220,6 +243,7 @@ static const struct {
unsigned long tf; /* ATA taskfile register block */
unsigned long ctl; /* ATA control/altstatus register block */
unsigned long bmdma; /* DMA register block */
unsigned long bmdma2; /* DMA register block #2 */
unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
unsigned long scr; /* SATA control register block */
unsigned long sien; /* SATA Interrupt Enable register */
@ -227,10 +251,10 @@ static const struct {
unsigned long sfis_cfg; /* SATA FIS reception config register */
} sil_port[] = {
/* port 0 ... */
{ 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c },
{ 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
{ 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
{ 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
{ 0x80, 0x8A, 0x00, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
{ 0xC0, 0xCA, 0x08, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
{ 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
{ 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
/* ... port 3 */
};
@ -263,7 +287,7 @@ static void sil_post_set_mode (struct ata_port *ap)
for (i = 0; i < 2; i++) {
dev = &ap->device[i];
if (!ata_dev_present(dev))
if (!ata_dev_enabled(dev))
dev_mode[i] = 0; /* PIO0/1/2 */
else if (dev->flags & ATA_DFLAG_PIO)
dev_mode[i] = 1; /* PIO3/4 */
@ -314,6 +338,151 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
writel(val, mmio);
}
static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
{
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
u8 status;
if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
u32 serror;
/* SIEN doesn't mask SATA IRQs on some 3112s. Those
* controllers continue to assert IRQ as long as
* SError bits are pending. Clear SError immediately.
*/
serror = sil_scr_read(ap, SCR_ERROR);
sil_scr_write(ap, SCR_ERROR, serror);
/* Trigger hotplug and accumulate SError only if the
* port isn't already frozen. Otherwise, PHY events
* during hardreset makes controllers with broken SIEN
* repeat probing needlessly.
*/
if (!(ap->flags & ATA_FLAG_FROZEN)) {
ata_ehi_hotplugged(&ap->eh_info);
ap->eh_info.serror |= serror;
}
goto freeze;
}
if (unlikely(!qc || qc->tf.ctl & ATA_NIEN))
goto freeze;
/* Check whether we are expecting interrupt in this state */
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
/* Some pre-ATAPI-4 devices assert INTRQ
* at this state when ready to receive CDB.
*/
/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
* The flag was turned on only for atapi devices.
* No need to check is_atapi_taskfile(&qc->tf) again.
*/
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
goto err_hsm;
break;
case HSM_ST_LAST:
if (qc->tf.protocol == ATA_PROT_DMA ||
qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
/* clear DMA-Start bit */
ap->ops->bmdma_stop(qc);
if (bmdma2 & SIL_DMA_ERROR) {
qc->err_mask |= AC_ERR_HOST_BUS;
ap->hsm_task_state = HSM_ST_ERR;
}
}
break;
case HSM_ST:
break;
default:
goto err_hsm;
}
/* check main status, clearing INTRQ */
status = ata_chk_status(ap);
if (unlikely(status & ATA_BUSY))
goto err_hsm;
/* ack bmdma irq events */
ata_bmdma_irq_clear(ap);
/* kick HSM in the ass */
ata_hsm_move(ap, qc, status, 0);
return;
err_hsm:
qc->err_mask |= AC_ERR_HSM;
freeze:
ata_port_freeze(ap);
}
static irqreturn_t sil_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
void __iomem *mmio_base = host_set->mmio_base;
int handled = 0;
int i;
spin_lock(&host_set->lock);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
continue;
if (bmdma2 == 0xffffffff ||
!(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
continue;
sil_host_intr(ap, bmdma2);
handled = 1;
}
spin_unlock(&host_set->lock);
return IRQ_RETVAL(handled);
}
static void sil_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
u32 tmp;
/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
writel(0, mmio_base + sil_port[ap->port_no].sien);
/* plug IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
tmp |= SIL_MASK_IDE0_INT << ap->port_no;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
}
static void sil_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
u32 tmp;
/* clear IRQ */
ata_chk_status(ap);
ata_bmdma_irq_clear(ap);
/* turn on SATA IRQ */
writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
/* turn on IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
writel(tmp, mmio_base + SIL_SYSCFG);
}
/**
* sil_dev_config - Apply device/host-specific errata fixups
* @ap: Port containing device to be examined
@ -360,16 +529,16 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
if (slow_down ||
((ap->flags & SIL_FLAG_MOD15WRITE) &&
(quirks & SIL_QUIRK_MOD15WRITE))) {
printk(KERN_INFO "ata%u(%u): applying Seagate errata fix (mod15write workaround)\n",
ap->id, dev->devno);
ata_dev_printk(dev, KERN_INFO, "applying Seagate errata fix "
"(mod15write workaround)\n");
dev->max_sectors = 15;
return;
}
/* limit to udma5 */
if (quirks & SIL_QUIRK_UDMA5MAX) {
printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
ap->id, dev->devno, model_num);
ata_dev_printk(dev, KERN_INFO,
"applying Maxtor errata fix %s\n", model_num);
dev->udma_mask &= ATA_UDMA5;
return;
}
@ -384,16 +553,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
int rc;
unsigned int i;
int pci_dev_busy = 0;
u32 tmp, irq_mask;
u32 tmp;
u8 cls;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
/*
* If this driver happens to only be useful on Apple's K2, then
* we should check that here as it has a normal Serverworks ID
*/
rc = pci_enable_device(pdev);
if (rc)
return rc;
@ -478,31 +643,13 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (ent->driver_data == sil_3114) {
irq_mask = SIL_MASK_4PORT;
/* flip the magic "make 4 ports work" bit */
tmp = readl(mmio_base + sil_port[2].bmdma);
if ((tmp & SIL_INTR_STEERING) == 0)
writel(tmp | SIL_INTR_STEERING,
mmio_base + sil_port[2].bmdma);
} else {
irq_mask = SIL_MASK_2PORT;
}
/* make sure IDE0/1/2/3 interrupts are not masked */
tmp = readl(mmio_base + SIL_SYSCFG);
if (tmp & irq_mask) {
tmp &= ~irq_mask;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
}
/* mask all SATA phy-related interrupts */
/* TODO: unmask bit 6 (SError N bit) for hotplug */
for (i = 0; i < probe_ent->n_ports; i++)
writel(0, mmio_base + sil_port[i].sien);
pci_set_master(pdev);
/* FIXME: check ata_device_add return value */

View File

@ -31,15 +31,15 @@
#include <asm/io.h>
#define DRV_NAME "sata_sil24"
#define DRV_VERSION "0.23"
#define DRV_VERSION "0.24"
/*
* Port request block (PRB) 32 bytes
*/
struct sil24_prb {
u16 ctrl;
u16 prot;
u32 rx_cnt;
__le16 ctrl;
__le16 prot;
__le32 rx_cnt;
u8 fis[6 * 4];
};
@ -47,17 +47,17 @@ struct sil24_prb {
* Scatter gather entry (SGE) 16 bytes
*/
struct sil24_sge {
u64 addr;
u32 cnt;
u32 flags;
__le64 addr;
__le32 cnt;
__le32 flags;
};
/*
* Port multiplier
*/
struct sil24_port_multiplier {
u32 diag;
u32 sactive;
__le32 diag;
__le32 sactive;
};
enum {
@ -86,12 +86,21 @@ enum {
/* HOST_SLOT_STAT bits */
HOST_SSTAT_ATTN = (1 << 31),
/* HOST_CTRL bits */
HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
/*
* Port registers
* (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
*/
PORT_REGS_SIZE = 0x2000,
PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */
PORT_LRAM = 0x0000, /* 31 LRAM slots and PM regs */
PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
/* 32 bit regs */
@ -142,8 +151,16 @@ enum {
PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */
PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */
PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
PORT_IRQ_UNK_FIS,
/* bits[27:16] are unmasked (raw) */
PORT_IRQ_RAW_SHIFT = 16,
@ -174,7 +191,7 @@ enum {
PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */
PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
@ -202,11 +219,19 @@ enum {
SGE_DRD = (1 << 29), /* discard data read (/dev/null)
data address ignored */
SIL24_MAX_CMDS = 31,
/* board id */
BID_SIL3124 = 0,
BID_SIL3132 = 1,
BID_SIL3131 = 2,
/* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_SKIP_D2H_BSY,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf,
};
@ -226,6 +251,58 @@ union sil24_cmd_block {
struct sil24_atapi_block atapi;
};
static struct sil24_cerr_info {
unsigned int err_mask, action;
const char *desc;
} sil24_cerr_db[] = {
[0] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
"device error" },
[PORT_CERR_DEV] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
"device error via D2H FIS" },
[PORT_CERR_SDB] = { AC_ERR_DEV, ATA_EH_REVALIDATE,
"device error via SDB FIS" },
[PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
"error in data FIS" },
[PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_SOFTRESET,
"failed to transmit command FIS" },
[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
"protocol mismatch" },
[PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
"data directon mismatch" },
[PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
"ran out of SGEs while writing" },
[PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
"ran out of SGEs while reading" },
[PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
"invalid data directon for ATAPI CDB" },
[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
"SGT no on qword boundary" },
[PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI target abort while fetching SGT" },
[PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI master abort while fetching SGT" },
[PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI parity error while fetching SGT" },
[PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_SOFTRESET,
"PRB not on qword boundary" },
[PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI target abort while fetching PRB" },
[PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI master abort while fetching PRB" },
[PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI parity error while fetching PRB" },
[PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"undefined error while transferring data" },
[PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI target abort while transferring data" },
[PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI master abort while transferring data" },
[PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_SOFTRESET,
"PCI parity error while transferring data" },
[PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_SOFTRESET,
"FIS received while sending service FIS" },
};
/*
* ap->private_data
*
@ -249,12 +326,14 @@ static u8 sil24_check_status(struct ata_port *ap);
static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes);
static void sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
static void sil24_irq_clear(struct ata_port *ap);
static void sil24_eng_timeout(struct ata_port *ap);
static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static void sil24_freeze(struct ata_port *ap);
static void sil24_thaw(struct ata_port *ap);
static void sil24_error_handler(struct ata_port *ap);
static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
static int sil24_port_start(struct ata_port *ap);
static void sil24_port_stop(struct ata_port *ap);
static void sil24_host_stop(struct ata_host_set *host_set);
@ -281,7 +360,8 @@ static struct scsi_host_template sil24_sht = {
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.change_queue_depth = ata_scsi_change_queue_depth,
.can_queue = SIL24_MAX_CMDS,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@ -290,6 +370,7 @@ static struct scsi_host_template sil24_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -304,19 +385,20 @@ static const struct ata_port_operations sil24_ops = {
.tf_read = sil24_tf_read,
.probe_reset = sil24_probe_reset,
.qc_prep = sil24_qc_prep,
.qc_issue = sil24_qc_issue,
.eng_timeout = sil24_eng_timeout,
.irq_handler = sil24_interrupt,
.irq_clear = sil24_irq_clear,
.scr_read = sil24_scr_read,
.scr_write = sil24_scr_write,
.freeze = sil24_freeze,
.thaw = sil24_thaw,
.error_handler = sil24_error_handler,
.post_internal_cmd = sil24_post_internal_cmd,
.port_start = sil24_port_start,
.port_stop = sil24_port_stop,
.host_stop = sil24_host_stop,
@ -333,9 +415,8 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3124 */
{
.sht = &sil24_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
SIL24_NPORTS2FLAG(4),
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
SIL24_FLAG_PCIX_IRQ_WOC,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -344,9 +425,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3132 */
{
.sht = &sil24_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
SIL24_NPORTS2FLAG(2),
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -355,9 +434,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3131/sil_3531 */
{
.sht = &sil24_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
SIL24_NPORTS2FLAG(1),
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -365,6 +442,13 @@ static struct ata_port_info sil24_port_info[] = {
},
};
static int sil24_tag(int tag)
{
if (unlikely(ata_tag_internal(tag)))
return 0;
return tag;
}
static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
{
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
@ -426,56 +510,65 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
*tf = pp->tf;
}
static int sil24_softreset(struct ata_port *ap, int verbose,
unsigned int *class)
static int sil24_init_port(struct ata_port *ap)
{
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
u32 tmp;
writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
ata_wait_register(port + PORT_CTRL_STAT,
PORT_CS_INIT, PORT_CS_INIT, 10, 100);
tmp = ata_wait_register(port + PORT_CTRL_STAT,
PORT_CS_RDY, 0, 10, 100);
if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY)
return -EIO;
return 0;
}
static int sil24_softreset(struct ata_port *ap, unsigned int *class)
{
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
struct sil24_port_priv *pp = ap->private_data;
struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
dma_addr_t paddr = pp->cmd_block_dma;
unsigned long timeout = jiffies + ATA_TMOUT_BOOT * HZ;
u32 irq_enable, irq_stat;
u32 mask, irq_stat;
const char *reason;
DPRINTK("ENTER\n");
if (!sata_dev_present(ap)) {
if (ata_port_offline(ap)) {
DPRINTK("PHY reports no device\n");
*class = ATA_DEV_NONE;
goto out;
}
/* temporarily turn off IRQs during SRST */
irq_enable = readl(port + PORT_IRQ_ENABLE_SET);
writel(irq_enable, port + PORT_IRQ_ENABLE_CLR);
/*
* XXX: Not sure whether the following sleep is needed or not.
* The original driver had it. So....
*/
msleep(10);
/* put the port into known state */
if (sil24_init_port(ap)) {
reason ="port not ready";
goto err;
}
/* do SRST */
prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
prb->fis[1] = 0; /* no PM yet */
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
do {
irq_stat = readl(port + PORT_IRQ_STAT);
writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
100, ATA_TMOUT_BOOT / HZ * 1000);
irq_stat >>= PORT_IRQ_RAW_SHIFT;
if (irq_stat & (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR))
break;
msleep(100);
} while (time_before(jiffies, timeout));
/* restore IRQs */
writel(irq_enable, port + PORT_IRQ_ENABLE_SET);
writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
irq_stat >>= PORT_IRQ_RAW_SHIFT;
if (!(irq_stat & PORT_IRQ_COMPLETE)) {
DPRINTK("EXIT, srst failed\n");
return -EIO;
if (irq_stat & PORT_IRQ_ERROR)
reason = "SRST command error";
else
reason = "timeout";
goto err;
}
sil24_update_tf(ap);
@ -487,22 +580,57 @@ static int sil24_softreset(struct ata_port *ap, int verbose,
out:
DPRINTK("EXIT, class=%u\n", *class);
return 0;
err:
ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
return -EIO;
}
static int sil24_hardreset(struct ata_port *ap, int verbose,
unsigned int *class)
static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
{
unsigned int dummy_class;
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
const char *reason;
int tout_msec, rc;
u32 tmp;
/* sil24 doesn't report device signature after hard reset */
return sata_std_hardreset(ap, verbose, &dummy_class);
}
/* sil24 does the right thing(tm) without any protection */
sata_set_spd(ap);
static int sil24_probe_reset(struct ata_port *ap, unsigned int *classes)
{
return ata_drive_probe_reset(ap, ata_std_probeinit,
sil24_softreset, sil24_hardreset,
ata_std_postreset, classes);
tout_msec = 100;
if (ata_port_online(ap))
tout_msec = 5000;
writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
tmp = ata_wait_register(port + PORT_CTRL_STAT,
PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
/* SStatus oscillates between zero and valid status after
* DEV_RST, debounce it.
*/
rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst);
if (rc) {
reason = "PHY debouncing failed";
goto err;
}
if (tmp & PORT_CS_DEV_RST) {
if (ata_port_offline(ap))
return 0;
reason = "link not ready";
goto err;
}
/* Sil24 doesn't store signature FIS after hardreset, so we
* can't wait for BSY to clear. Some devices take a long time
* to get ready and those devices will choke if we don't wait
* for BSY clearance here. Tell libata to perform follow-up
* softreset.
*/
return -EAGAIN;
err:
ata_port_printk(ap, KERN_ERR, "hardreset failed (%s)\n", reason);
return -EIO;
}
static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
@ -528,17 +656,20 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data;
union sil24_cmd_block *cb = pp->cmd_block + qc->tag;
union sil24_cmd_block *cb;
struct sil24_prb *prb;
struct sil24_sge *sge;
u16 ctrl = 0;
cb = &pp->cmd_block[sil24_tag(qc->tag)];
switch (qc->tf.protocol) {
case ATA_PROT_PIO:
case ATA_PROT_DMA:
case ATA_PROT_NCQ:
case ATA_PROT_NODATA:
prb = &cb->ata.prb;
sge = cb->ata.sge;
prb->ctrl = 0;
break;
case ATA_PROT_ATAPI:
@ -551,12 +682,10 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
if (qc->tf.flags & ATA_TFLAG_WRITE)
prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE);
ctrl = PRB_CTRL_PACKET_WRITE;
else
prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ);
} else
prb->ctrl = 0;
ctrl = PRB_CTRL_PACKET_READ;
}
break;
default:
@ -565,6 +694,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
BUG();
}
prb->ctrl = cpu_to_le16(ctrl);
ata_tf_to_fis(&qc->tf, prb->fis, 0);
if (qc->flags & ATA_QCFLAG_DMAMAP)
@ -574,11 +704,18 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
struct sil24_port_priv *pp = ap->private_data;
dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block);
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
unsigned int tag = sil24_tag(qc->tag);
dma_addr_t paddr;
void __iomem *activate;
paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
activate = port + PORT_CMD_ACTIVATE + tag * 8;
writel((u32)paddr, activate);
writel((u64)paddr >> 32, activate + 4);
writel((u32)paddr, port + PORT_CMD_ACTIVATE);
return 0;
}
@ -587,162 +724,139 @@ static void sil24_irq_clear(struct ata_port *ap)
/* unused */
}
static int __sil24_restart_controller(void __iomem *port)
static void sil24_freeze(struct ata_port *ap)
{
u32 tmp;
int cnt;
writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
/* Max ~10ms */
for (cnt = 0; cnt < 10000; cnt++) {
tmp = readl(port + PORT_CTRL_STAT);
if (tmp & PORT_CS_RDY)
return 0;
udelay(1);
}
return -1;
}
static void sil24_restart_controller(struct ata_port *ap)
{
if (__sil24_restart_controller((void __iomem *)ap->ioaddr.cmd_addr))
printk(KERN_ERR DRV_NAME
" ata%u: failed to restart controller\n", ap->id);
}
static int __sil24_reset_controller(void __iomem *port)
{
int cnt;
u32 tmp;
/* Reset controller state. Is this correct? */
writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
readl(port + PORT_CTRL_STAT); /* sync */
/* Max ~100ms */
for (cnt = 0; cnt < 1000; cnt++) {
udelay(100);
tmp = readl(port + PORT_CTRL_STAT);
if (!(tmp & PORT_CS_DEV_RST))
break;
}
if (tmp & PORT_CS_DEV_RST)
return -1;
if (tmp & PORT_CS_RDY)
return 0;
return __sil24_restart_controller(port);
}
static void sil24_reset_controller(struct ata_port *ap)
{
printk(KERN_NOTICE DRV_NAME
" ata%u: resetting controller...\n", ap->id);
if (__sil24_reset_controller((void __iomem *)ap->ioaddr.cmd_addr))
printk(KERN_ERR DRV_NAME
" ata%u: failed to reset controller\n", ap->id);
}
static void sil24_eng_timeout(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
printk(KERN_ERR "ata%u: command timeout\n", ap->id);
qc->err_mask |= AC_ERR_TIMEOUT;
ata_eh_qc_complete(qc);
sil24_reset_controller(ap);
}
static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
{
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
struct sil24_port_priv *pp = ap->private_data;
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
u32 irq_stat, cmd_err, sstatus, serror;
unsigned int err_mask;
irq_stat = readl(port + PORT_IRQ_STAT);
writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */
if (!(irq_stat & PORT_IRQ_ERROR)) {
/* ignore non-completion, non-error irqs for now */
printk(KERN_WARNING DRV_NAME
"ata%u: non-error exception irq (irq_stat %x)\n",
ap->id, irq_stat);
return;
}
cmd_err = readl(port + PORT_CMD_ERR);
sstatus = readl(port + PORT_SSTATUS);
serror = readl(port + PORT_SERROR);
if (serror)
writel(serror, port + PORT_SERROR);
/*
* Don't log ATAPI device errors. They're supposed to happen
* and any serious errors will be logged using sense data by
* the SCSI layer.
/* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
* PORT_IRQ_ENABLE instead.
*/
if (ap->device[0].class != ATA_DEV_ATAPI || cmd_err > PORT_CERR_SDB)
printk("ata%u: error interrupt on port%d\n"
" stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
}
if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
/*
* Device is reporting error, tf registers are valid.
*/
sil24_update_tf(ap);
err_mask = ac_err_mask(pp->tf.command);
sil24_restart_controller(ap);
} else {
/*
* Other errors. libata currently doesn't have any
* mechanism to report these errors. Just turn on
* ATA_ERR.
*/
err_mask = AC_ERR_OTHER;
sil24_reset_controller(ap);
static void sil24_thaw(struct ata_port *ap)
{
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
u32 tmp;
/* clear IRQ */
tmp = readl(port + PORT_IRQ_STAT);
writel(tmp, port + PORT_IRQ_STAT);
/* turn IRQ back on */
writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
}
static void sil24_error_intr(struct ata_port *ap)
{
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
struct ata_eh_info *ehi = &ap->eh_info;
int freeze = 0;
u32 irq_stat;
/* on error, we need to clear IRQ explicitly */
irq_stat = readl(port + PORT_IRQ_STAT);
writel(irq_stat, port + PORT_IRQ_STAT);
/* first, analyze and record host port events */
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, ", %s",
irq_stat & PORT_IRQ_PHYRDY_CHG ?
"PHY RDY changed" : "device exchanged");
freeze = 1;
}
if (qc) {
qc->err_mask |= err_mask;
ata_qc_complete(qc);
if (irq_stat & PORT_IRQ_UNK_FIS) {
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
ata_ehi_push_desc(ehi , ", unknown FIS");
freeze = 1;
}
/* deal with command error */
if (irq_stat & PORT_IRQ_ERROR) {
struct sil24_cerr_info *ci = NULL;
unsigned int err_mask = 0, action = 0;
struct ata_queued_cmd *qc;
u32 cerr;
/* analyze CMD_ERR */
cerr = readl(port + PORT_CMD_ERR);
if (cerr < ARRAY_SIZE(sil24_cerr_db))
ci = &sil24_cerr_db[cerr];
if (ci && ci->desc) {
err_mask |= ci->err_mask;
action |= ci->action;
ata_ehi_push_desc(ehi, ", %s", ci->desc);
} else {
err_mask |= AC_ERR_OTHER;
action |= ATA_EH_SOFTRESET;
ata_ehi_push_desc(ehi, ", unknown command error %d",
cerr);
}
/* record error info */
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc) {
sil24_update_tf(ap);
qc->err_mask |= err_mask;
} else
ehi->err_mask |= err_mask;
ehi->action |= action;
}
/* freeze or abort */
if (freeze)
ata_port_freeze(ap);
else
ata_port_abort(ap);
}
static void sil24_finish_qc(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_RESULT_TF)
sil24_update_tf(qc->ap);
}
static inline void sil24_host_intr(struct ata_port *ap)
{
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
u32 slot_stat;
u32 slot_stat, qc_active;
int rc;
slot_stat = readl(port + PORT_SLOT_STAT);
if (!(slot_stat & HOST_SSTAT_ATTN)) {
struct sil24_port_priv *pp = ap->private_data;
/*
* !HOST_SSAT_ATTN guarantees successful completion,
* so reading back tf registers is unnecessary for
* most commands. TODO: read tf registers for
* commands which require these values on successful
* completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
* DEVICE RESET and READ PORT MULTIPLIER (any more?).
*/
sil24_update_tf(ap);
if (qc) {
qc->err_mask |= ac_err_mask(pp->tf.command);
ata_qc_complete(qc);
}
} else
sil24_error_intr(ap, slot_stat);
if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
sil24_error_intr(ap);
return;
}
if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
qc_active = slot_stat & ~HOST_SSTAT_ATTN;
rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
if (rc > 0)
return;
if (rc < 0) {
struct ata_eh_info *ehi = &ap->eh_info;
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
ata_port_freeze(ap);
return;
}
if (ata_ratelimit())
ata_port_printk(ap, KERN_INFO, "spurious interrupt "
"(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
slot_stat, ap->active_tag, ap->sactive);
}
static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
@ -769,7 +883,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
for (i = 0; i < host_set->n_ports; i++)
if (status & (1 << i)) {
struct ata_port *ap = host_set->ports[i];
if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
sil24_host_intr(host_set->ports[i]);
handled++;
} else
@ -782,9 +896,35 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
return IRQ_RETVAL(handled);
}
static void sil24_error_handler(struct ata_port *ap)
{
struct ata_eh_context *ehc = &ap->eh_context;
if (sil24_init_port(ap)) {
ata_eh_freeze_port(ap);
ehc->i.action |= ATA_EH_HARDRESET;
}
/* perform recovery */
ata_do_eh(ap, ata_std_prereset, sil24_softreset, sil24_hardreset,
ata_std_postreset);
}
static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
if (qc->flags & ATA_QCFLAG_FAILED)
qc->err_mask |= AC_ERR_OTHER;
/* make DMA engine forget about the failed command */
if (qc->err_mask)
sil24_init_port(ap);
}
static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
{
const size_t cb_size = sizeof(*pp->cmd_block);
const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
}
@ -794,7 +934,7 @@ static int sil24_port_start(struct ata_port *ap)
struct device *dev = ap->host_set->dev;
struct sil24_port_priv *pp;
union sil24_cmd_block *cb;
size_t cb_size = sizeof(*cb);
size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
dma_addr_t cb_dma;
int rc = -ENOMEM;
@ -858,6 +998,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *host_base = NULL;
void __iomem *port_base = NULL;
int i, rc;
u32 tmp;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@ -910,37 +1051,53 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/*
* Configure the device
*/
/*
* FIXME: This device is certainly 64-bit capable. We just
* don't know how to use it. After fixing 32bit activation in
* this function, enable 64bit masks here.
*/
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
goto out_free;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit consistent DMA enable failed\n");
goto out_free;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"64-bit DMA enable failed\n");
goto out_free;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
goto out_free;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit consistent DMA enable failed\n");
goto out_free;
}
}
/* GPIO off */
writel(0, host_base + HOST_FLASH_CMD);
/* Mask interrupts during initialization */
/* Apply workaround for completion IRQ loss on PCI-X errata */
if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
tmp = readl(host_base + HOST_CTRL);
if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
dev_printk(KERN_INFO, &pdev->dev,
"Applying completion IRQ loss on PCI-X "
"errata fix\n");
else
probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
}
/* clear global reset & mask interrupts during initialization */
writel(0, host_base + HOST_CTRL);
for (i = 0; i < probe_ent->n_ports; i++) {
void __iomem *port = port_base + i * PORT_REGS_SIZE;
unsigned long portu = (unsigned long)port;
u32 tmp;
int cnt;
probe_ent->port[i].cmd_addr = portu + PORT_PRB;
probe_ent->port[i].cmd_addr = portu;
probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
ata_std_ports(&probe_ent->port[i]);
@ -952,18 +1109,20 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tmp = readl(port + PORT_CTRL_STAT);
if (tmp & PORT_CS_PORT_RST) {
writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
readl(port + PORT_CTRL_STAT); /* sync */
for (cnt = 0; cnt < 10; cnt++) {
msleep(10);
tmp = readl(port + PORT_CTRL_STAT);
if (!(tmp & PORT_CS_PORT_RST))
break;
}
tmp = ata_wait_register(port + PORT_CTRL_STAT,
PORT_CS_PORT_RST,
PORT_CS_PORT_RST, 10, 100);
if (tmp & PORT_CS_PORT_RST)
dev_printk(KERN_ERR, &pdev->dev,
"failed to clear port RST\n");
}
/* Configure IRQ WoC */
if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
else
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
/* Zero error counters. */
writel(0x8000, port + PORT_DECODE_ERR_THRESH);
writel(0x8000, port + PORT_CRC_ERR_THRESH);
@ -972,26 +1131,11 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
writel(0x0000, port + PORT_CRC_ERR_CNT);
writel(0x0000, port + PORT_HSHK_ERR_CNT);
/* FIXME: 32bit activation? */
writel(0, port + PORT_ACTIVATE_UPPER_ADDR);
writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
/* Configure interrupts */
writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
port + PORT_IRQ_ENABLE_SET);
/* Clear interrupts */
writel(0x0fff0fff, port + PORT_IRQ_STAT);
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
/* Always use 64bit activation */
writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
/* Clear port multiplier enable and resume bits */
writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
/* Reset itself */
if (__sil24_reset_controller(port))
dev_printk(KERN_ERR, &pdev->dev,
"failed to reset controller\n");
}
/* Turn on interrupts */

View File

@ -43,7 +43,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_sis"
#define DRV_VERSION "0.5"
#define DRV_VERSION "0.6"
enum {
sis_180 = 0,
@ -96,6 +96,7 @@ static struct scsi_host_template sis_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -106,14 +107,17 @@ static const struct ata_port_operations sis_ops = {
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.phy_reset = sata_phy_reset,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.scr_read = sis_scr_read,
@ -125,8 +129,7 @@ static const struct ata_port_operations sis_ops = {
static struct ata_port_info sis_port_info = {
.sht = &sis_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
ATA_FLAG_NO_LEGACY,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f,
.mwdma_mask = 0x7,
.udma_mask = 0x7f,

View File

@ -54,7 +54,7 @@
#endif /* CONFIG_PPC_OF */
#define DRV_NAME "sata_svw"
#define DRV_VERSION "1.07"
#define DRV_VERSION "1.8"
enum {
/* Taskfile registers offsets */
@ -257,7 +257,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
int len, index;
/* Find the ata_port */
ap = (struct ata_port *) &shost->hostdata[0];
ap = ata_shost_to_port(shost);
if (ap == NULL)
return 0;
@ -299,6 +299,7 @@ static struct scsi_host_template k2_sata_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
#ifdef CONFIG_PPC_OF
.proc_info = k2_sata_proc_info,
#endif
@ -313,14 +314,17 @@ static const struct ata_port_operations k2_sata_ops = {
.check_status = k2_stat_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.phy_reset = sata_phy_reset,
.bmdma_setup = k2_bmdma_setup_mmio,
.bmdma_start = k2_bmdma_start_mmio,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_mmio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.scr_read = k2_sata_scr_read,
@ -420,8 +424,8 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
probe_ent->sht = &k2_sata_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO;
probe_ent->port_ops = &k2_sata_ops;
probe_ent->n_ports = 4;
probe_ent->irq = pdev->irq;

View File

@ -46,7 +46,7 @@
#include "sata_promise.h"
#define DRV_NAME "sata_sx4"
#define DRV_VERSION "0.8"
#define DRV_VERSION "0.9"
enum {
@ -191,6 +191,7 @@ static struct scsi_host_template pdc_sata_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -204,6 +205,7 @@ static const struct ata_port_operations pdc_20621_ops = {
.phy_reset = pdc_20621_phy_reset,
.qc_prep = pdc20621_qc_prep,
.qc_issue = pdc20621_qc_issue_prot,
.data_xfer = ata_mmio_data_xfer,
.eng_timeout = pdc_eng_timeout,
.irq_handler = pdc20621_interrupt,
.irq_clear = pdc20621_irq_clear,
@ -218,7 +220,7 @@ static const struct ata_port_info pdc_port_info[] = {
.sht = &pdc_sata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO |
ATA_FLAG_NO_ATAPI,
ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -833,11 +835,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
if (tmp && ap &&
!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN)))
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc20621_host_intr(ap, qc, (i > 4),
mmio_base);
}
@ -868,15 +870,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
case ATA_PROT_NODATA:
printk(KERN_ERR "ata%u: command timeout\n", ap->id);
ata_port_printk(ap, KERN_ERR, "command timeout\n");
qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
break;
default:
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
ap->id, qc->tf.command, drv_stat);
ata_port_printk(ap, KERN_ERR,
"unknown timeout, cmd 0x%x stat 0x%x\n",
qc->tf.command, drv_stat);
qc->err_mask |= ac_err_mask(drv_stat);
break;
@ -1375,10 +1378,6 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
/*
* If this driver happens to only be useful on Apple's K2, then
* we should check that here as it has a normal Serverworks ID
*/
rc = pci_enable_device(pdev);
if (rc)
return rc;

View File

@ -37,7 +37,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_uli"
#define DRV_VERSION "0.5"
#define DRV_VERSION "0.6"
enum {
uli_5289 = 0,
@ -90,6 +90,7 @@ static struct scsi_host_template uli_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -102,16 +103,18 @@ static const struct ata_port_operations uli_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.phy_reset = sata_phy_reset,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
@ -126,8 +129,7 @@ static const struct ata_port_operations uli_ops = {
static struct ata_port_info uli_port_info = {
.sht = &uli_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
ATA_FLAG_NO_LEGACY,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &uli_ops,

View File

@ -47,7 +47,7 @@
#include <asm/io.h>
#define DRV_NAME "sata_via"
#define DRV_VERSION "1.1"
#define DRV_VERSION "1.2"
enum board_ids_enum {
vt6420,
@ -103,6 +103,7 @@ static struct scsi_host_template svia_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -115,8 +116,6 @@ static const struct ata_port_operations svia_sata_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.phy_reset = sata_phy_reset,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
@ -124,8 +123,12 @@ static const struct ata_port_operations svia_sata_ops = {
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
@ -140,7 +143,7 @@ static const struct ata_port_operations svia_sata_ops = {
static struct ata_port_info svia_port_info = {
.sht = &svia_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | ATA_FLAG_NO_LEGACY,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
@ -235,8 +238,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = &svia_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET |
ATA_FLAG_NO_LEGACY;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
probe_ent->port_ops = &svia_sata_ops;
probe_ent->n_ports = N_PORTS;
probe_ent->irq = pdev->irq;

View File

@ -221,14 +221,21 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
ap = host_set->ports[i];
if (ap && !(ap->flags &
(ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
if (is_vsc_sata_int_err(i, int_status)) {
u32 err_status;
printk(KERN_DEBUG "%s: ignoring interrupt(s)\n", __FUNCTION__);
err_status = ap ? vsc_sata_scr_read(ap, SCR_ERROR) : 0;
vsc_sata_scr_write(ap, SCR_ERROR, err_status);
handled++;
}
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN))) {
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += ata_host_intr(ap, qc);
} else if (is_vsc_sata_int_err(i, int_status)) {
else if (is_vsc_sata_int_err(i, int_status)) {
/*
* On some chips (i.e. Intel 31244), an error
* interrupt will sneak in at initialization
@ -272,6 +279,7 @@ static struct scsi_host_template vsc_sata_sht = {
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@ -283,14 +291,17 @@ static const struct ata_port_operations vsc_sata_ops = {
.exec_command = ata_exec_command,
.check_status = ata_check_status,
.dev_select = ata_std_dev_select,
.phy_reset = sata_phy_reset,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = vsc_sata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.scr_read = vsc_sata_scr_read,
@ -385,7 +396,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
probe_ent->sht = &vsc_sata_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_SATA_RESET;
ATA_FLAG_MMIO;
probe_ent->port_ops = &vsc_sata_ops;
probe_ent->n_ports = 4;
probe_ent->irq = pdev->irq;

View File

@ -578,6 +578,24 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
static DEFINE_PER_CPU(struct list_head, scsi_done_q);
/**
* scsi_req_abort_cmd -- Request command recovery for the specified command
* cmd: pointer to the SCSI command of interest
*
* This function requests that SCSI Core start recovery for the
* command by deleting the timer and adding the command to the eh
* queue. It can be called by either LLDDs or SCSI Core. LLDDs who
* implement their own error recovery MAY ignore the timeout event if
* they generated scsi_req_abort_cmd.
*/
void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
{
if (!scsi_delete_timer(cmd))
return;
scsi_times_out(cmd);
}
EXPORT_SYMBOL(scsi_req_abort_cmd);
/**
* scsi_done - Enqueue the finished SCSI command into the done queue.
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives

View File

@ -57,6 +57,28 @@ void scsi_eh_wakeup(struct Scsi_Host *shost)
}
}
/**
* scsi_schedule_eh - schedule EH for SCSI host
* @shost: SCSI host to invoke error handling on.
*
* Schedule SCSI EH without scmd.
**/
void scsi_schedule_eh(struct Scsi_Host *shost)
{
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
shost->host_eh_scheduled++;
scsi_eh_wakeup(shost);
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
EXPORT_SYMBOL_GPL(scsi_schedule_eh);
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@ -1515,7 +1537,7 @@ int scsi_error_handler(void *data)
*/
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
if (shost->host_failed == 0 ||
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
shost->host_failed != shost->host_busy) {
SCSI_LOG_ERROR_RECOVERY(1,
printk("Error handler scsi_eh_%d sleeping\n",

View File

@ -500,7 +500,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
if (unlikely(scsi_host_in_recovery(shost) &&
shost->host_failed))
(shost->host_failed || shost->host_eh_scheduled)))
scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);

View File

@ -0,0 +1,6 @@
#ifndef _SCSI_TRANSPORT_API_H
#define _SCSI_TRANSPORT_API_H
void scsi_schedule_eh(struct Scsi_Host *shost);
#endif /* _SCSI_TRANSPORT_API_H */

View File

@ -97,6 +97,9 @@ enum {
ATA_DRQ = (1 << 3), /* data request i/o */
ATA_ERR = (1 << 0), /* have an error */
ATA_SRST = (1 << 2), /* software reset */
ATA_ICRC = (1 << 7), /* interface CRC error */
ATA_UNC = (1 << 6), /* uncorrectable media error */
ATA_IDNF = (1 << 4), /* ID not found */
ATA_ABORTED = (1 << 2), /* command aborted */
/* ATA command block registers */
@ -130,6 +133,8 @@ enum {
ATA_CMD_WRITE = 0xCA,
ATA_CMD_WRITE_EXT = 0x35,
ATA_CMD_WRITE_FUA_EXT = 0x3D,
ATA_CMD_FPDMA_READ = 0x60,
ATA_CMD_FPDMA_WRITE = 0x61,
ATA_CMD_PIO_READ = 0x20,
ATA_CMD_PIO_READ_EXT = 0x24,
ATA_CMD_PIO_WRITE = 0x30,
@ -148,6 +153,10 @@ enum {
ATA_CMD_INIT_DEV_PARAMS = 0x91,
ATA_CMD_READ_NATIVE_MAX = 0xF8,
ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
ATA_CMD_READ_LOG_EXT = 0x2f,
/* READ_LOG_EXT pages */
ATA_LOG_SATA_NCQ = 0x10,
/* SETFEATURES stuff */
SETFEATURES_XFER = 0x03,
@ -172,6 +181,9 @@ enum {
XFER_PIO_0 = 0x08,
XFER_PIO_SLOW = 0x00,
SETFEATURES_WC_ON = 0x02, /* Enable write cache */
SETFEATURES_WC_OFF = 0x82, /* Disable write cache */
/* ATAPI stuff */
ATAPI_PKT_DMA = (1 << 0),
ATAPI_DMADIR = (1 << 2), /* ATAPI data dir:
@ -192,6 +204,16 @@ enum {
SCR_ACTIVE = 3,
SCR_NOTIFICATION = 4,
/* SError bits */
SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */
SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */
SERR_DATA = (1 << 8), /* unrecovered data error */
SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */
SERR_PROTOCOL = (1 << 10), /* protocol violation */
SERR_INTERNAL = (1 << 11), /* host internal error */
SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */
SERR_DEV_XCHG = (1 << 26), /* device exchanged */
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
@ -199,6 +221,7 @@ enum {
ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
};
enum ata_tf_protocols {
@ -207,6 +230,7 @@ enum ata_tf_protocols {
ATA_PROT_NODATA, /* no data */
ATA_PROT_PIO, /* PIO single sector */
ATA_PROT_DMA, /* DMA */
ATA_PROT_NCQ, /* NCQ */
ATA_PROT_ATAPI, /* packet command, PIO data xfer*/
ATA_PROT_ATAPI_NODATA, /* packet command, no data */
ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */
@ -262,6 +286,8 @@ struct ata_taskfile {
#define ata_id_has_pm(id) ((id)[82] & (1 << 3))
#define ata_id_has_lba(id) ((id)[49] & (1 << 9))
#define ata_id_has_dma(id) ((id)[49] & (1 << 8))
#define ata_id_has_ncq(id) ((id)[76] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1)
#define ata_id_removeable(id) ((id)[0] & (1 << 7))
#define ata_id_has_dword_io(id) ((id)[50] & (1 << 0))
#define ata_id_u32(id,n) \
@ -272,6 +298,8 @@ struct ata_taskfile {
((u64) (id)[(n) + 1] << 16) | \
((u64) (id)[(n) + 0]) )
#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
static inline unsigned int ata_id_major_version(const u16 *id)
{
unsigned int mver;
@ -311,6 +339,15 @@ static inline int is_atapi_taskfile(const struct ata_taskfile *tf)
(tf->protocol == ATA_PROT_ATAPI_DMA);
}
static inline int is_multi_taskfile(struct ata_taskfile *tf)
{
return (tf->command == ATA_CMD_READ_MULTI) ||
(tf->command == ATA_CMD_WRITE_MULTI) ||
(tf->command == ATA_CMD_READ_MULTI_EXT) ||
(tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
static inline int ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))

View File

@ -33,6 +33,7 @@
#include <asm/io.h>
#include <linux/ata.h>
#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
/*
* compile-time options: to be removed as soon as all the drivers are
@ -44,7 +45,6 @@
#undef ATA_NDEBUG /* define to disable quick runtime checks */
#undef ATA_ENABLE_PATA /* define to enable PATA support in some
* low-level drivers */
#undef ATAPI_ENABLE_DMADIR /* enables ATAPI DMADIR bridge support */
/* note: prints function name for you */
@ -108,8 +108,11 @@ enum {
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
ATA_MAX_PORTS = 8,
ATA_DEF_QUEUE = 1,
ATA_MAX_QUEUE = 1,
/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32,
ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
ATA_MAX_SECTORS = 200, /* FIXME */
ATA_MAX_SECTORS_LBA48 = 65535,
ATA_MAX_BUS = 2,
ATA_DEF_BUSY_WAIT = 10000,
ATA_SHORT_PAUSE = (HZ >> 6) + 1,
@ -120,9 +123,17 @@ enum {
ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_device stuff */
ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */
ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */
ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */
ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
ATA_DFLAG_DETACH = (1 << 16),
ATA_DFLAG_DETACHED = (1 << 17),
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@ -132,43 +143,57 @@ enum {
ATA_DEV_NONE = 5, /* no device */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */
ATA_FLAG_SATA = (1 << 3),
ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */
ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */
ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */
ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
* proper HSM is in place. */
ATA_FLAG_DEBUGMSG = (1 << 10),
ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */
ATA_FLAG_SATA = (1 << 1),
ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
* doesn't handle PIO interrupts */
ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */
ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
* Register FIS clearing BSY */
ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */
ATA_FLAG_DEBUGMSG = (1 << 13),
ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */
ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */
ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */
ATA_FLAG_EH_PENDING = (1 << 15), /* EH pending */
ATA_FLAG_EH_IN_PROGRESS = (1 << 16), /* EH in progress */
ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */
ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */
ATA_FLAG_LOADING = (1 << 19), /* boot/loading probe */
ATA_FLAG_UNLOADING = (1 << 20), /* module is unloading */
ATA_FLAG_SCSI_HOTPLUG = (1 << 21), /* SCSI hotplug scheduled */
ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */
ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */
ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */
ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
/* bits 24:31 of ap->flags are reserved for LLDD specific flags */
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
/* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
/* various lengths of time */
ATA_TMOUT_PIO = 30 * HZ,
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
ATA_TMOUT_CDB = 30 * HZ,
ATA_TMOUT_CDB_QUICK = 5 * HZ,
ATA_TMOUT_INTERNAL = 30 * HZ,
ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
@ -207,21 +232,56 @@ enum {
/* size of buffer to pad xfers ending on unaligned boundaries */
ATA_DMA_PAD_SZ = 4,
ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
/* Masks for port functions */
/* masks for port functions */
ATA_PORT_PRIMARY = (1 << 0),
ATA_PORT_SECONDARY = (1 << 1),
/* ering size */
ATA_ERING_SIZE = 32,
/* desc_len for ata_eh_info and context */
ATA_EH_DESC_LEN = 80,
/* reset / recovery action types */
ATA_EH_REVALIDATE = (1 << 0),
ATA_EH_SOFTRESET = (1 << 1),
ATA_EH_HARDRESET = (1 << 2),
ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
/* ata_eh_info->flags */
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
/* max repeat if error condition is still set after ->error_handler */
ATA_EH_MAX_REPEAT = 5,
/* how hard are we gonna try to probe/recover devices */
ATA_PROBE_MAX_TRIES = 3,
ATA_EH_RESET_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
/* Drive spinup time (time from power-on to the first D2H FIS)
* in msecs - 8s currently. Failing to get ready in this time
* isn't critical. It will result in reset failure for
* controllers which can't wait for the first D2H FIS. libata
* will retry, so it just has to be long enough to spin up
* most devices.
*/
ATA_SPINUP_WAIT = 8000,
};
enum hsm_task_states {
HSM_ST_UNKNOWN,
HSM_ST_IDLE,
HSM_ST_POLL,
HSM_ST_TMOUT,
HSM_ST,
HSM_ST_LAST,
HSM_ST_LAST_POLL,
HSM_ST_ERR,
HSM_ST_UNKNOWN, /* state unknown */
HSM_ST_IDLE, /* no command on going */
HSM_ST, /* (waiting the device to) transfer data */
HSM_ST_LAST, /* (waiting the device to) complete command */
HSM_ST_ERR, /* error */
HSM_ST_FIRST, /* (waiting the device to)
write CDB or first data block */
};
enum ata_completion_errors {
@ -244,9 +304,9 @@ struct ata_queued_cmd;
/* typedefs */
typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
typedef void (*ata_probeinit_fn_t)(struct ata_port *);
typedef int (*ata_reset_fn_t)(struct ata_port *, int, unsigned int *);
typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *);
typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
struct ata_ioports {
unsigned long cmd_addr;
@ -297,7 +357,8 @@ struct ata_host_set {
unsigned long flags;
int simplex_claimed; /* Keep seperate in case we
ever need to do this locked */
struct ata_port * ports[0];
struct ata_host_set *next; /* for legacy mode */
struct ata_port *ports[0];
};
struct ata_queued_cmd {
@ -336,7 +397,7 @@ struct ata_queued_cmd {
struct scatterlist *__sg;
unsigned int err_mask;
struct ata_taskfile result_tf;
ata_qc_cb_t complete_fn;
void *private_data;
@ -348,12 +409,26 @@ struct ata_host_stats {
unsigned long rw_reqbuf;
};
struct ata_ering_entry {
int is_io;
unsigned int err_mask;
u64 timestamp;
};
struct ata_ering {
int cursor;
struct ata_ering_entry ring[ATA_ERING_SIZE];
};
struct ata_device {
u64 n_sectors; /* size of device, if ATA */
unsigned long flags; /* ATA_DFLAG_xxx */
unsigned int class; /* ATA_DEV_xxx */
struct ata_port *ap;
unsigned int devno; /* 0 or 1 */
u16 *id; /* IDENTIFY xxx DEVICE data */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
u64 n_sectors; /* size of device, if ATA */
unsigned int class; /* ATA_DEV_xxx */
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u8 pio_mode;
u8 dma_mode;
u8 xfer_mode;
@ -373,11 +448,42 @@ struct ata_device {
u16 cylinders; /* Number of cylinders */
u16 heads; /* Number of heads */
u16 sectors; /* Number of sectors per track */
/* error history */
struct ata_ering ering;
};
/* Offset into struct ata_device. Fields above it are maintained
* acress device init. Fields below are zeroed.
*/
#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors)
struct ata_eh_info {
struct ata_device *dev; /* offending device */
u32 serror; /* SError from LLDD */
unsigned int err_mask; /* port-wide err_mask */
unsigned int action; /* ATA_EH_* action mask */
unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
unsigned int flags; /* ATA_EHI_* flags */
unsigned long hotplug_timestamp;
unsigned int probe_mask;
char desc[ATA_EH_DESC_LEN];
int desc_len;
};
struct ata_eh_context {
struct ata_eh_info i;
int tries[ATA_MAX_DEVICES];
unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask;
};
struct ata_port {
struct Scsi_Host *host; /* our co-allocated scsi host */
const struct ata_port_operations *ops;
spinlock_t *lock;
unsigned long flags; /* ATA_FLAG_xxx */
unsigned int id; /* unique id req'd by scsi midlyr */
unsigned int port_no; /* unique port #; from zero */
@ -397,26 +503,40 @@ struct ata_port {
unsigned int mwdma_mask;
unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */
unsigned int hw_sata_spd_limit;
unsigned int sata_spd_limit; /* SATA PHY speed limit */
/* record runtime error info, protected by host_set lock */
struct ata_eh_info eh_info;
/* EH context owned by EH */
struct ata_eh_context eh_context;
struct ata_device device[ATA_MAX_DEVICES];
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
unsigned long qactive;
unsigned long qc_allocated;
unsigned int qc_active;
unsigned int active_tag;
u32 sactive;
struct ata_host_stats stats;
struct ata_host_set *host_set;
struct device *dev;
struct work_struct port_task;
struct work_struct hotplug_task;
struct work_struct scsi_rescan_task;
unsigned int hsm_task_state;
unsigned long pio_task_timeout;
u32 msg_enable;
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
void *private_data;
u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
};
struct ata_port_operations {
@ -438,7 +558,6 @@ struct ata_port_operations {
void (*phy_reset) (struct ata_port *ap); /* obsolete */
void (*set_mode) (struct ata_port *ap);
int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
void (*post_set_mode) (struct ata_port *ap);
@ -447,10 +566,20 @@ struct ata_port_operations {
void (*bmdma_setup) (struct ata_queued_cmd *qc);
void (*bmdma_start) (struct ata_queued_cmd *qc);
void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
void (*qc_prep) (struct ata_queued_cmd *qc);
unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
void (*eng_timeout) (struct ata_port *ap);
/* Error handlers. ->error_handler overrides ->eng_timeout and
* indicates that new-style EH is in place.
*/
void (*eng_timeout) (struct ata_port *ap); /* obsolete */
void (*freeze) (struct ata_port *ap);
void (*thaw) (struct ata_port *ap);
void (*error_handler) (struct ata_port *ap);
void (*post_internal_cmd) (struct ata_queued_cmd *qc);
irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
void (*irq_clear) (struct ata_port *);
@ -492,22 +621,22 @@ struct ata_timing {
#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
extern const unsigned long sata_deb_timing_boot[];
extern const unsigned long sata_deb_timing_eh[];
extern const unsigned long sata_deb_timing_before_fsrst[];
extern void ata_port_probe(struct ata_port *);
extern void __sata_phy_reset(struct ata_port *ap);
extern void sata_phy_reset(struct ata_port *ap);
extern void ata_bus_reset(struct ata_port *ap);
extern int ata_drive_probe_reset(struct ata_port *ap,
ata_probeinit_fn_t probeinit,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset, unsigned int *classes);
extern void ata_std_probeinit(struct ata_port *ap);
extern int ata_std_softreset(struct ata_port *ap, int verbose,
unsigned int *classes);
extern int sata_std_hardreset(struct ata_port *ap, int verbose,
unsigned int *class);
extern int sata_set_spd(struct ata_port *ap);
extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
extern int ata_std_prereset(struct ata_port *ap);
extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
extern int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
int post_reset);
extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
extern void ata_port_disable(struct ata_port *);
extern void ata_std_ports(struct ata_ioports *ioaddr);
#ifdef CONFIG_PCI
@ -519,24 +648,32 @@ extern int ata_pci_device_resume(struct pci_dev *pdev);
extern int ata_pci_clear_simplex(struct pci_dev *pdev);
#endif /* CONFIG_PCI */
extern int ata_device_add(const struct ata_probe_ent *ent);
extern void ata_port_detach(struct ata_port *ap);
extern void ata_host_set_remove(struct ata_host_set *host_set);
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
extern int ata_scsi_release(struct Scsi_Host *host);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
extern int sata_scr_valid(struct ata_port *ap);
extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
extern int ata_port_online(struct ata_port *ap);
extern int ata_port_offline(struct ata_port *ap);
extern int ata_scsi_device_resume(struct scsi_device *);
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
extern int ata_device_resume(struct ata_port *, struct ata_device *);
extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state);
extern int ata_device_resume(struct ata_device *);
extern int ata_device_suspend(struct ata_device *, pm_message_t state);
extern int ata_ratelimit(void);
extern unsigned int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat,
unsigned long timeout);
extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
void *data, unsigned long delay);
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
unsigned long interval_msec,
unsigned long timeout_msec);
/*
* Default driver ops implementations
@ -550,11 +687,16 @@ extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
extern u8 ata_check_status(struct ata_port *ap);
extern u8 ata_altstatus(struct ata_port *ap);
extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
extern int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes);
extern int ata_port_start (struct ata_port *ap);
extern void ata_port_stop (struct ata_port *ap);
extern void ata_host_stop (struct ata_host_set *host_set);
extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data);
extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data);
extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data);
extern void ata_qc_prep(struct ata_queued_cmd *qc);
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
@ -572,17 +714,29 @@ extern void ata_bmdma_start (struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void __ata_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eng_timeout(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
struct scsi_cmnd *cmd,
extern void ata_bmdma_freeze(struct ata_port *ap);
extern void ata_bmdma_thaw(struct ata_port *ap);
extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset,
ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset);
extern void ata_bmdma_error_handler(struct ata_port *ap);
extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
void (*finish_qc)(struct ata_queued_cmd *));
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *));
extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int geom[]);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern struct ata_device *ata_dev_pair(struct ata_port *ap,
struct ata_device *adev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
/*
* Timing helpers
@ -628,7 +782,64 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit
extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
#endif /* CONFIG_PCI */
/*
* EH
*/
extern void ata_eng_timeout(struct ata_port *ap);
extern void ata_port_schedule_eh(struct ata_port *ap);
extern int ata_port_abort(struct ata_port *ap);
extern int ata_port_freeze(struct ata_port *ap);
extern void ata_eh_freeze_port(struct ata_port *ap);
extern void ata_eh_thaw_port(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset);
/*
* printk helpers
*/
#define ata_port_printk(ap, lv, fmt, args...) \
printk(lv"ata%u: "fmt, (ap)->id , ##args)
#define ata_dev_printk(dev, lv, fmt, args...) \
printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
/*
* ata_eh_info helpers
*/
#define ata_ehi_push_desc(ehi, fmt, args...) do { \
(ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
ATA_EH_DESC_LEN - (ehi)->desc_len, \
fmt , ##args); \
} while (0)
#define ata_ehi_clear_desc(ehi) do { \
(ehi)->desc[0] = '\0'; \
(ehi)->desc_len = 0; \
} while (0)
static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
{
if (ehi->flags & ATA_EHI_HOTPLUGGED)
return;
ehi->flags |= ATA_EHI_HOTPLUGGED;
ehi->hotplug_timestamp = jiffies;
ehi->err_mask |= AC_ERR_ATA_BUS;
ehi->action |= ATA_EH_SOFTRESET;
ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
}
/*
* qc helpers
*/
static inline int
ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
{
@ -671,14 +882,39 @@ static inline unsigned int ata_tag_valid(unsigned int tag)
return (tag < ATA_MAX_QUEUE) ? 1 : 0;
}
static inline unsigned int ata_class_present(unsigned int class)
static inline unsigned int ata_tag_internal(unsigned int tag)
{
return tag == ATA_MAX_QUEUE - 1;
}
static inline unsigned int ata_class_enabled(unsigned int class)
{
return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
}
static inline unsigned int ata_dev_present(const struct ata_device *dev)
static inline unsigned int ata_class_disabled(unsigned int class)
{
return ata_class_present(dev->class);
return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
}
static inline unsigned int ata_class_absent(unsigned int class)
{
return !ata_class_enabled(class) && !ata_class_disabled(class);
}
static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
{
return ata_class_enabled(dev->class);
}
static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
{
return ata_class_disabled(dev->class);
}
static inline unsigned int ata_dev_absent(const struct ata_device *dev)
{
return ata_class_absent(dev->class);
}
static inline u8 ata_chk_status(struct ata_port *ap)
@ -759,20 +995,35 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
qc->tf.ctl |= ATA_NIEN;
}
static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap,
unsigned int tag)
static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
unsigned int tag)
{
if (likely(ata_tag_valid(tag)))
return &ap->qcmd[tag];
return NULL;
}
static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device)
static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
unsigned int tag)
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
if (unlikely(!qc) || !ap->ops->error_handler)
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
return qc;
return NULL;
}
static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
{
memset(tf, 0, sizeof(*tf));
tf->ctl = ap->ctl;
if (device == 0)
tf->ctl = dev->ap->ctl;
if (dev->devno == 0)
tf->device = ATA_DEVICE_OBS;
else
tf->device = ATA_DEVICE_OBS | ATA_DEV1;
@ -787,26 +1038,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
qc->nbytes = qc->curbytes = 0;
qc->err_mask = 0;
ata_tf_init(qc->ap, &qc->tf, qc->dev->devno);
}
ata_tf_init(qc->dev, &qc->tf);
/**
* ata_qc_complete - Complete an active ATA command
* @qc: Command to complete
* @err_mask: ATA Status register contents
*
* Indicate to the mid and upper layers that an ATA
* command has completed, with either an ok or not-ok status.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static inline void ata_qc_complete(struct ata_queued_cmd *qc)
{
if (unlikely(qc->flags & ATA_QCFLAG_EH_SCHEDULED))
return;
__ata_qc_complete(qc);
/* init result_tf such that it indicates normal completion */
qc->result_tf.command = ATA_DRDY;
qc->result_tf.feature = 0;
}
/**
@ -885,28 +1121,6 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
return status;
}
static inline u32 scr_read(struct ata_port *ap, unsigned int reg)
{
return ap->ops->scr_read(ap, reg);
}
static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
{
ap->ops->scr_write(ap, reg, val);
}
static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
u32 val)
{
ap->ops->scr_write(ap, reg, val);
(void) ap->ops->scr_read(ap, reg);
}
static inline unsigned int sata_dev_present(struct ata_port *ap)
{
return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0;
}
static inline int ata_try_flush_cache(const struct ata_device *dev)
{
return ata_id_wcache_enabled(dev->id) ||
@ -916,7 +1130,7 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
static inline unsigned int ac_err_mask(u8 status)
{
if (status & ATA_BUSY)
if (status & (ATA_BUSY | ATA_DRQ))
return AC_ERR_HSM;
if (status & (ATA_ERR | ATA_DF))
return AC_ERR_DEV;
@ -944,4 +1158,9 @@ static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
}
static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
{
return (struct ata_port *) &host->hostdata[0];
}
#endif /* __LINUX_LIBATA_H__ */

View File

@ -1196,8 +1196,12 @@
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
@ -1255,6 +1259,7 @@
#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
#define PCI_DEVICE_ID_VIA_3269_0 0x0269
#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
#define PCI_DEVICE_ID_VIA_3296_0 0x0296
#define PCI_DEVICE_ID_VIA_8363_0 0x0305
#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
#define PCI_DEVICE_ID_VIA_8371_0 0x0391
@ -1262,6 +1267,7 @@
#define PCI_DEVICE_ID_VIA_82C561 0x0561
#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
#define PCI_DEVICE_ID_VIA_82C576 0x0576
#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x0581
#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
#define PCI_DEVICE_ID_VIA_82C596 0x0596
#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
@ -1302,10 +1308,11 @@
#define PCI_DEVICE_ID_VIA_8783_0 0x3208
#define PCI_DEVICE_ID_VIA_8237 0x3227
#define PCI_DEVICE_ID_VIA_8251 0x3287
#define PCI_DEVICE_ID_VIA_3296_0 0x0296
#define PCI_DEVICE_ID_VIA_8237A 0x3337
#define PCI_DEVICE_ID_VIA_8231 0x8231
#define PCI_DEVICE_ID_VIA_8231_4 0x8235
#define PCI_DEVICE_ID_VIA_8365_1 0x8305
#define PCI_DEVICE_ID_VIA_CX700 0x8324
#define PCI_DEVICE_ID_VIA_8371_1 0x8391
#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
#define PCI_DEVICE_ID_VIA_838X_1 0xB188

View File

@ -145,6 +145,7 @@ extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
extern void scsi_put_command(struct scsi_cmnd *);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int);
extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);

View File

@ -472,6 +472,7 @@ struct Scsi_Host {
*/
unsigned int host_busy; /* commands actually active on low-level */
unsigned int host_failed; /* commands that failed. */
unsigned int host_eh_scheduled; /* EH scheduled without command */
unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
int resetting; /* if set, it means that last_reset is a valid value */