Merge with /usr/src/ntfs-2.6.git.

This commit is contained in:
Anton Altaparmakov 2005-05-27 15:36:21 +01:00
commit 5eac51462f
83 changed files with 17608 additions and 2616 deletions

View file

@ -1,7 +1,7 @@
/*
* mf.c
* Copyright (C) 2001 Troy D. Armstrong IBM Corporation
* Copyright (C) 2004 Stephen Rothwell IBM Corporation
* Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
*
* This modules exists as an interface between a Linux secondary partition
* running on an iSeries and the primary partition's Virtual Service
@ -36,10 +36,12 @@
#include <asm/time.h>
#include <asm/uaccess.h>
#include <asm/paca.h>
#include <asm/iSeries/vio.h>
#include <asm/iSeries/mf.h>
#include <asm/iSeries/HvLpConfig.h>
#include <asm/iSeries/ItSpCommArea.h>
#include <asm/iSeries/ItLpQueue.h>
/*
* This is the structure layout for the Machine Facilites LPAR event
@ -696,36 +698,23 @@ static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
complete(&rtc->com);
}
int mf_get_rtc(struct rtc_time *tm)
static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
{
struct ce_msg_comp_data ce_complete;
struct rtc_time_data rtc_data;
int rc;
memset(&ce_complete, 0, sizeof(ce_complete));
memset(&rtc_data, 0, sizeof(rtc_data));
init_completion(&rtc_data.com);
ce_complete.handler = &get_rtc_time_complete;
ce_complete.token = &rtc_data;
rc = signal_ce_msg_simple(0x40, &ce_complete);
if (rc)
return rc;
wait_for_completion(&rtc_data.com);
tm->tm_wday = 0;
tm->tm_yday = 0;
tm->tm_isdst = 0;
if (rtc_data.rc) {
if (rc) {
tm->tm_sec = 0;
tm->tm_min = 0;
tm->tm_hour = 0;
tm->tm_mday = 15;
tm->tm_mon = 5;
tm->tm_year = 52;
return rtc_data.rc;
return rc;
}
if ((rtc_data.ce_msg.ce_msg[2] == 0xa9) ||
(rtc_data.ce_msg.ce_msg[2] == 0xaf)) {
if ((ce_msg[2] == 0xa9) ||
(ce_msg[2] == 0xaf)) {
/* TOD clock is not set */
tm->tm_sec = 1;
tm->tm_min = 1;
@ -736,7 +725,6 @@ int mf_get_rtc(struct rtc_time *tm)
mf_set_rtc(tm);
}
{
u8 *ce_msg = rtc_data.ce_msg.ce_msg;
u8 year = ce_msg[5];
u8 sec = ce_msg[6];
u8 min = ce_msg[7];
@ -765,6 +753,63 @@ int mf_get_rtc(struct rtc_time *tm)
return 0;
}
int mf_get_rtc(struct rtc_time *tm)
{
struct ce_msg_comp_data ce_complete;
struct rtc_time_data rtc_data;
int rc;
memset(&ce_complete, 0, sizeof(ce_complete));
memset(&rtc_data, 0, sizeof(rtc_data));
init_completion(&rtc_data.com);
ce_complete.handler = &get_rtc_time_complete;
ce_complete.token = &rtc_data;
rc = signal_ce_msg_simple(0x40, &ce_complete);
if (rc)
return rc;
wait_for_completion(&rtc_data.com);
return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
}
struct boot_rtc_time_data {
int busy;
struct ce_msg_data ce_msg;
int rc;
};
static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
{
struct boot_rtc_time_data *rtc = token;
memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
rtc->rc = 0;
rtc->busy = 0;
}
int mf_get_boot_rtc(struct rtc_time *tm)
{
struct ce_msg_comp_data ce_complete;
struct boot_rtc_time_data rtc_data;
int rc;
memset(&ce_complete, 0, sizeof(ce_complete));
memset(&rtc_data, 0, sizeof(rtc_data));
rtc_data.busy = 1;
ce_complete.handler = &get_boot_rtc_time_complete;
ce_complete.token = &rtc_data;
rc = signal_ce_msg_simple(0x40, &ce_complete);
if (rc)
return rc;
/* We need to poll here as we are not yet taking interrupts */
while (rtc_data.busy) {
extern unsigned long lpevent_count;
struct ItLpQueue *lpq = get_paca()->lpqueue_ptr;
if (lpq && ItLpQueue_isLpIntPending(lpq))
lpevent_count += ItLpQueue_process(lpq, NULL);
}
return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
}
int mf_set_rtc(struct rtc_time *tm)
{
char ce_time[12];

View file

@ -292,47 +292,10 @@ int iSeries_set_rtc_time(struct rtc_time *tm)
void iSeries_get_boot_time(struct rtc_time *tm)
{
unsigned long time;
static unsigned long lastsec = 1;
u32 dataWord1 = *((u32 *)(&xSpCommArea.xBcdTimeAtIplStart));
u32 dataWord2 = *(((u32 *)&(xSpCommArea.xBcdTimeAtIplStart)) + 1);
int year = 1970;
int year1 = ( dataWord1 >> 24 ) & 0x000000FF;
int year2 = ( dataWord1 >> 16 ) & 0x000000FF;
int sec = ( dataWord1 >> 8 ) & 0x000000FF;
int min = dataWord1 & 0x000000FF;
int hour = ( dataWord2 >> 24 ) & 0x000000FF;
int day = ( dataWord2 >> 8 ) & 0x000000FF;
int mon = dataWord2 & 0x000000FF;
if ( piranha_simulator )
return;
BCD_TO_BIN(sec);
BCD_TO_BIN(min);
BCD_TO_BIN(hour);
BCD_TO_BIN(day);
BCD_TO_BIN(mon);
BCD_TO_BIN(year1);
BCD_TO_BIN(year2);
year = year1 * 100 + year2;
time = mktime(year, mon, day, hour, min, sec);
time += ( jiffies / HZ );
/* Now THIS is a nasty hack!
* It ensures that the first two calls get different answers.
* That way the loop in init_time (time.c) will not think
* the clock is stuck.
*/
if ( lastsec ) {
time -= lastsec;
--lastsec;
}
to_tm(time, tm);
tm->tm_year -= 1900;
mf_get_boot_rtc(tm);
tm->tm_mon -= 1;
}
#endif

View file

@ -515,6 +515,7 @@ void __init time_init(void)
do_gtod.varp = &do_gtod.vars[0];
do_gtod.var_idx = 0;
do_gtod.varp->tb_orig_stamp = tb_last_stamp;
get_paca()->next_jiffy_update_tb = tb_last_stamp + tb_ticks_per_jiffy;
do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
do_gtod.varp->tb_to_xs = tb_to_xs;

View file

@ -383,6 +383,17 @@ static void __init process_switch(char c)
/* Use PROM debug console. */
register_console(&prom_debug_console);
break;
case 'P':
/* Force UltraSPARC-III P-Cache on. */
if (tlb_type != cheetah) {
printk("BOOT: Ignoring P-Cache force option.\n");
break;
}
cheetah_pcache_forced_on = 1;
add_taint(TAINT_MACHINE_CHECK);
cheetah_enable_pcache();
break;
default:
printk("Unknown boot switch (-%c)\n", c);
break;

View file

@ -123,6 +123,9 @@ void __init smp_callin(void)
smp_setup_percpu_timer();
if (cheetah_pcache_forced_on)
cheetah_enable_pcache();
local_irq_enable();
calibrate_delay();

View file

@ -421,6 +421,25 @@ asmlinkage void cee_log(unsigned long ce_status,
}
}
int cheetah_pcache_forced_on;
void cheetah_enable_pcache(void)
{
unsigned long dcr;
printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
smp_processor_id());
__asm__ __volatile__("ldxa [%%g0] %1, %0"
: "=r" (dcr)
: "i" (ASI_DCU_CONTROL_REG));
dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
}
/* Cheetah error trap handling. */
static unsigned long ecache_flush_physbase;
static unsigned long ecache_flush_linesize;

View file

@ -332,10 +332,12 @@ void handle_BUG(struct pt_regs *regs)
printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", f.filename, f.line);
}
#ifdef CONFIG_BUG
void out_of_line_bug(void)
{
BUG();
}
#endif
static DEFINE_SPINLOCK(die_lock);
static int die_owner = -1;

View file

@ -193,8 +193,9 @@ EXPORT_SYMBOL(smp_num_siblings);
extern void do_softirq_thunk(void);
EXPORT_SYMBOL(do_softirq_thunk);
void out_of_line_bug(void);
#ifdef CONFIG_BUG
EXPORT_SYMBOL(out_of_line_bug);
#endif
EXPORT_SYMBOL(init_level4_pgt);

View file

@ -520,7 +520,7 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
" interface. Other values will set the major device number"
" to that value.");
static struct class *ipmi_class;
static struct class_simple *ipmi_class;
static void ipmi_new_smi(int if_num)
{
@ -534,7 +534,7 @@ static void ipmi_new_smi(int if_num)
static void ipmi_smi_gone(int if_num)
{
class_simple_device_remove(ipmi_class, MKDEV(ipmi_major, if_num));
class_simple_device_remove(MKDEV(ipmi_major, if_num));
devfs_remove("ipmidev/%d", if_num);
}

View file

@ -1933,7 +1933,7 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
/*
* check if dma is safe
*/
if ((rq->data_len & mask) || (addr & mask))
if ((rq->data_len & 3) || (addr & mask))
info->dma = 0;
}
@ -3255,16 +3255,12 @@ sector_t ide_cdrom_capacity (ide_drive_t *drive)
return capacity * sectors_per_frame;
}
static
int ide_cdrom_cleanup(ide_drive_t *drive)
static int ide_cd_remove(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
struct cdrom_info *info = drive->driver_data;
if (ide_unregister_subdriver(drive)) {
printk(KERN_ERR "%s: %s: failed to ide_unregister_subdriver\n",
__FUNCTION__, drive->name);
return 1;
}
ide_unregister_subdriver(drive, info->driver);
del_gendisk(info->disk);
@ -3297,7 +3293,7 @@ static void ide_cd_release(struct kref *kref)
kfree(info);
}
static int ide_cdrom_attach (ide_drive_t *drive);
static int ide_cd_probe(struct device *);
#ifdef CONFIG_PROC_FS
static int proc_idecd_read_capacity
@ -3320,19 +3316,20 @@ static ide_proc_entry_t idecd_proc[] = {
static ide_driver_t ide_cdrom_driver = {
.owner = THIS_MODULE,
.name = "ide-cdrom",
.gen_driver = {
.name = "ide-cdrom",
.bus = &ide_bus_type,
.probe = ide_cd_probe,
.remove = ide_cd_remove,
},
.version = IDECD_VERSION,
.media = ide_cdrom,
.busy = 0,
.supports_dsc_overlap = 1,
.cleanup = ide_cdrom_cleanup,
.do_request = ide_do_rw_cdrom,
.end_request = ide_end_request,
.error = __ide_error,
.abort = __ide_abort,
.proc = idecd_proc,
.attach = ide_cdrom_attach,
.drives = LIST_HEAD_INIT(ide_cdrom_driver.drives),
};
static int idecd_open(struct inode * inode, struct file * file)
@ -3418,8 +3415,9 @@ static char *ignore = NULL;
module_param(ignore, charp, 0400);
MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
static int ide_cdrom_attach (ide_drive_t *drive)
static int ide_cd_probe(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
struct cdrom_info *info;
struct gendisk *g;
struct request_sense sense;
@ -3453,11 +3451,8 @@ static int ide_cdrom_attach (ide_drive_t *drive)
ide_init_disk(g, drive);
if (ide_register_subdriver(drive, &ide_cdrom_driver)) {
printk(KERN_ERR "%s: Failed to register the driver with ide.c\n",
drive->name);
goto out_put_disk;
}
ide_register_subdriver(drive, &ide_cdrom_driver);
memset(info, 0, sizeof (struct cdrom_info));
kref_init(&info->kref);
@ -3470,7 +3465,6 @@ static int ide_cdrom_attach (ide_drive_t *drive)
drive->driver_data = info;
DRIVER(drive)->busy++;
g->minors = 1;
snprintf(g->devfs_name, sizeof(g->devfs_name),
"%s/cd", drive->devfs_name);
@ -3478,8 +3472,7 @@ static int ide_cdrom_attach (ide_drive_t *drive)
g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
if (ide_cdrom_setup(drive)) {
struct cdrom_device_info *devinfo = &info->devinfo;
DRIVER(drive)->busy--;
ide_unregister_subdriver(drive);
ide_unregister_subdriver(drive, &ide_cdrom_driver);
if (info->buffer != NULL)
kfree(info->buffer);
if (info->toc != NULL)
@ -3492,7 +3485,6 @@ static int ide_cdrom_attach (ide_drive_t *drive)
drive->driver_data = NULL;
goto failed;
}
DRIVER(drive)->busy--;
cdrom_read_toc(drive, &sense);
g->fops = &idecd_ops;
@ -3500,23 +3492,20 @@ static int ide_cdrom_attach (ide_drive_t *drive)
add_disk(g);
return 0;
out_put_disk:
put_disk(g);
out_free_cd:
kfree(info);
failed:
return 1;
return -ENODEV;
}
static void __exit ide_cdrom_exit(void)
{
ide_unregister_driver(&ide_cdrom_driver);
driver_unregister(&ide_cdrom_driver.gen_driver);
}
static int ide_cdrom_init(void)
{
ide_register_driver(&ide_cdrom_driver);
return 0;
return driver_register(&ide_cdrom_driver.gen_driver);
}
module_init(ide_cdrom_init);

View file

@ -1024,14 +1024,16 @@ static void ide_cacheflush_p(ide_drive_t *drive)
printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
}
static int idedisk_cleanup (ide_drive_t *drive)
static int ide_disk_remove(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
struct ide_disk_obj *idkp = drive->driver_data;
struct gendisk *g = idkp->disk;
ide_cacheflush_p(drive);
if (ide_unregister_subdriver(drive))
return 1;
ide_unregister_subdriver(drive, idkp->driver);
del_gendisk(g);
ide_disk_put(idkp);
@ -1052,7 +1054,7 @@ static void ide_disk_release(struct kref *kref)
kfree(idkp);
}
static int idedisk_attach(ide_drive_t *drive);
static int ide_disk_probe(struct device *dev);
static void ide_device_shutdown(struct device *dev)
{
@ -1082,27 +1084,23 @@ static void ide_device_shutdown(struct device *dev)
dev->bus->suspend(dev, PMSG_SUSPEND);
}
/*
* IDE subdriver functions, registered with ide.c
*/
static ide_driver_t idedisk_driver = {
.owner = THIS_MODULE,
.gen_driver = {
.name = "ide-disk",
.bus = &ide_bus_type,
.probe = ide_disk_probe,
.remove = ide_disk_remove,
.shutdown = ide_device_shutdown,
},
.name = "ide-disk",
.version = IDEDISK_VERSION,
.media = ide_disk,
.busy = 0,
.supports_dsc_overlap = 0,
.cleanup = idedisk_cleanup,
.do_request = ide_do_rw_disk,
.end_request = ide_end_request,
.error = __ide_error,
.abort = __ide_abort,
.proc = idedisk_proc,
.attach = idedisk_attach,
.drives = LIST_HEAD_INIT(idedisk_driver.drives),
};
static int idedisk_open(struct inode *inode, struct file *filp)
@ -1199,8 +1197,9 @@ static struct block_device_operations idedisk_ops = {
MODULE_DESCRIPTION("ATA DISK Driver");
static int idedisk_attach(ide_drive_t *drive)
static int ide_disk_probe(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
struct ide_disk_obj *idkp;
struct gendisk *g;
@ -1222,10 +1221,7 @@ static int idedisk_attach(ide_drive_t *drive)
ide_init_disk(g, drive);
if (ide_register_subdriver(drive, &idedisk_driver)) {
printk (KERN_ERR "ide-disk: %s: Failed to register the driver with ide.c\n", drive->name);
goto out_put_disk;
}
ide_register_subdriver(drive, &idedisk_driver);
memset(idkp, 0, sizeof(*idkp));
@ -1239,7 +1235,6 @@ static int idedisk_attach(ide_drive_t *drive)
drive->driver_data = idkp;
DRIVER(drive)->busy++;
idedisk_setup(drive);
if ((!drive->head || drive->head > 16) && !drive->select.b.lba) {
printk(KERN_ERR "%s: INVALID GEOMETRY: %d PHYSICAL HEADS?\n",
@ -1247,7 +1242,7 @@ static int idedisk_attach(ide_drive_t *drive)
drive->attach = 0;
} else
drive->attach = 1;
DRIVER(drive)->busy--;
g->minors = 1 << PARTN_BITS;
strcpy(g->devfs_name, drive->devfs_name);
g->driverfs_dev = &drive->gendev;
@ -1257,22 +1252,20 @@ static int idedisk_attach(ide_drive_t *drive)
add_disk(g);
return 0;
out_put_disk:
put_disk(g);
out_free_idkp:
kfree(idkp);
failed:
return 1;
return -ENODEV;
}
static void __exit idedisk_exit (void)
{
ide_unregister_driver(&idedisk_driver);
driver_unregister(&idedisk_driver.gen_driver);
}
static int idedisk_init (void)
{
return ide_register_driver(&idedisk_driver);
return driver_register(&idedisk_driver.gen_driver);
}
module_init(idedisk_init);

View file

@ -1865,13 +1865,13 @@ static void idefloppy_setup (ide_drive_t *drive, idefloppy_floppy_t *floppy)
idefloppy_add_settings(drive);
}
static int idefloppy_cleanup (ide_drive_t *drive)
static int ide_floppy_remove(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
idefloppy_floppy_t *floppy = drive->driver_data;
struct gendisk *g = floppy->disk;
if (ide_unregister_subdriver(drive))
return 1;
ide_unregister_subdriver(drive, floppy->driver);
del_gendisk(g);
@ -1916,26 +1916,24 @@ static ide_proc_entry_t idefloppy_proc[] = {
#endif /* CONFIG_PROC_FS */
static int idefloppy_attach(ide_drive_t *drive);
static int ide_floppy_probe(struct device *);
/*
* IDE subdriver functions, registered with ide.c
*/
static ide_driver_t idefloppy_driver = {
.owner = THIS_MODULE,
.name = "ide-floppy",
.gen_driver = {
.name = "ide-floppy",
.bus = &ide_bus_type,
.probe = ide_floppy_probe,
.remove = ide_floppy_remove,
},
.version = IDEFLOPPY_VERSION,
.media = ide_floppy,
.busy = 0,
.supports_dsc_overlap = 0,
.cleanup = idefloppy_cleanup,
.do_request = idefloppy_do_request,
.end_request = idefloppy_do_end_request,
.error = __ide_error,
.abort = __ide_abort,
.proc = idefloppy_proc,
.attach = idefloppy_attach,
.drives = LIST_HEAD_INIT(idefloppy_driver.drives),
};
static int idefloppy_open(struct inode *inode, struct file *filp)
@ -2122,8 +2120,9 @@ static struct block_device_operations idefloppy_ops = {
.revalidate_disk= idefloppy_revalidate_disk
};
static int idefloppy_attach (ide_drive_t *drive)
static int ide_floppy_probe(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
idefloppy_floppy_t *floppy;
struct gendisk *g;
@ -2152,10 +2151,7 @@ static int idefloppy_attach (ide_drive_t *drive)
ide_init_disk(g, drive);
if (ide_register_subdriver(drive, &idefloppy_driver)) {
printk (KERN_ERR "ide-floppy: %s: Failed to register the driver with ide.c\n", drive->name);
goto out_put_disk;
}
ide_register_subdriver(drive, &idefloppy_driver);
memset(floppy, 0, sizeof(*floppy));
@ -2169,9 +2165,8 @@ static int idefloppy_attach (ide_drive_t *drive)
drive->driver_data = floppy;
DRIVER(drive)->busy++;
idefloppy_setup (drive, floppy);
DRIVER(drive)->busy--;
g->minors = 1 << PARTN_BITS;
g->driverfs_dev = &drive->gendev;
strcpy(g->devfs_name, drive->devfs_name);
@ -2181,19 +2176,17 @@ static int idefloppy_attach (ide_drive_t *drive)
add_disk(g);
return 0;
out_put_disk:
put_disk(g);
out_free_floppy:
kfree(floppy);
failed:
return 1;
return -ENODEV;
}
MODULE_DESCRIPTION("ATAPI FLOPPY Driver");
static void __exit idefloppy_exit (void)
{
ide_unregister_driver(&idefloppy_driver);
driver_unregister(&idefloppy_driver.gen_driver);
}
/*
@ -2202,8 +2195,7 @@ static void __exit idefloppy_exit (void)
static int idefloppy_init (void)
{
printk("ide-floppy driver " IDEFLOPPY_VERSION "\n");
ide_register_driver(&idefloppy_driver);
return 0;
return driver_register(&idefloppy_driver.gen_driver);
}
module_init(idefloppy_init);

View file

@ -47,6 +47,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/spinlock.h>
#include <linux/kmod.h>
#include <linux/pci.h>
@ -696,13 +697,13 @@ static int wait_hwif_ready(ide_hwif_t *hwif)
SELECT_DRIVE(&hwif->drives[0]);
hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
mdelay(2);
rc = ide_wait_not_busy(hwif, 10000);
rc = ide_wait_not_busy(hwif, 35000);
if (rc)
return rc;
SELECT_DRIVE(&hwif->drives[1]);
hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
mdelay(2);
rc = ide_wait_not_busy(hwif, 10000);
rc = ide_wait_not_busy(hwif, 35000);
/* Exit function with master reselected (let's be sane) */
SELECT_DRIVE(&hwif->drives[0]);
@ -918,7 +919,7 @@ int probe_hwif_init_with_fixup(ide_hwif_t *hwif, void (*fixup)(ide_hwif_t *hwif)
want them on default or a new "empty" class
for hotplug reprobing ? */
if (drive->present) {
ata_attach(drive);
device_register(&drive->gendev);
}
}
}
@ -1279,10 +1280,51 @@ void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
EXPORT_SYMBOL_GPL(ide_init_disk);
static void ide_remove_drive_from_hwgroup(ide_drive_t *drive)
{
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
if (drive == drive->next) {
/* special case: last drive from hwgroup. */
BUG_ON(hwgroup->drive != drive);
hwgroup->drive = NULL;
} else {
ide_drive_t *walk;
walk = hwgroup->drive;
while (walk->next != drive)
walk = walk->next;
walk->next = drive->next;
if (hwgroup->drive == drive) {
hwgroup->drive = drive->next;
hwgroup->hwif = hwgroup->drive->hwif;
}
}
BUG_ON(hwgroup->drive == drive);
}
static void drive_release_dev (struct device *dev)
{
ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
spin_lock_irq(&ide_lock);
if (drive->devfs_name[0] != '\0') {
devfs_remove(drive->devfs_name);
drive->devfs_name[0] = '\0';
}
ide_remove_drive_from_hwgroup(drive);
if (drive->id != NULL) {
kfree(drive->id);
drive->id = NULL;
}
drive->present = 0;
/* Messed up locking ... */
spin_unlock_irq(&ide_lock);
blk_cleanup_queue(drive->queue);
spin_lock_irq(&ide_lock);
drive->queue = NULL;
spin_unlock_irq(&ide_lock);
up(&drive->gendev_rel_sem);
}
@ -1306,7 +1348,6 @@ static void init_gendisk (ide_hwif_t *hwif)
drive->gendev.driver_data = drive;
drive->gendev.release = drive_release_dev;
if (drive->present) {
device_register(&drive->gendev);
sprintf(drive->devfs_name, "ide/host%d/bus%d/target%d/lun%d",
(hwif->channel && hwif->mate) ?
hwif->mate->index : hwif->index,
@ -1412,7 +1453,7 @@ int ideprobe_init (void)
hwif->chipset = ide_generic;
for (unit = 0; unit < MAX_DRIVES; ++unit)
if (hwif->drives[unit].present)
ata_attach(&hwif->drives[unit]);
device_register(&hwif->drives[unit].gendev);
}
}
return 0;

View file

@ -307,17 +307,41 @@ static int proc_ide_read_driver
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
ide_drive_t *drive = (ide_drive_t *) data;
ide_driver_t *driver = drive->driver;
struct device *dev = &drive->gendev;
ide_driver_t *ide_drv;
int len;
if (driver) {
down_read(&dev->bus->subsys.rwsem);
if (dev->driver) {
ide_drv = container_of(dev->driver, ide_driver_t, gen_driver);
len = sprintf(page, "%s version %s\n",
driver->name, driver->version);
dev->driver->name, ide_drv->version);
} else
len = sprintf(page, "ide-default version 0.9.newide\n");
up_read(&dev->bus->subsys.rwsem);
PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
}
static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
{
struct device *dev = &drive->gendev;
int ret = 1;
down_write(&dev->bus->subsys.rwsem);
device_release_driver(dev);
/* FIXME: device can still be in use by previous driver */
strlcpy(drive->driver_req, driver, sizeof(drive->driver_req));
device_attach(dev);
drive->driver_req[0] = 0;
if (dev->driver == NULL)
device_attach(dev);
if (dev->driver && !strcmp(dev->driver->name, driver))
ret = 0;
up_write(&dev->bus->subsys.rwsem);
return ret;
}
static int proc_ide_write_driver
(struct file *file, const char __user *buffer, unsigned long count, void *data)
{
@ -488,16 +512,32 @@ void destroy_proc_ide_interface(ide_hwif_t *hwif)
}
}
extern struct seq_operations ide_drivers_op;
static int proc_print_driver(struct device_driver *drv, void *data)
{
ide_driver_t *ide_drv = container_of(drv, ide_driver_t, gen_driver);
struct seq_file *s = data;
seq_printf(s, "%s version %s\n", drv->name, ide_drv->version);
return 0;
}
static int ide_drivers_show(struct seq_file *s, void *p)
{
bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
return 0;
}
static int ide_drivers_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ide_drivers_op);
return single_open(file, &ide_drivers_show, NULL);
}
static struct file_operations ide_drivers_operations = {
.open = ide_drivers_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.release = single_release,
};
void proc_ide_create(void)

View file

@ -4681,21 +4681,12 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
idetape_add_settings(drive);
}
static int idetape_cleanup (ide_drive_t *drive)
static int ide_tape_remove(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
if (test_bit(IDETAPE_BUSY, &tape->flags) || drive->usage ||
tape->first_stage != NULL || tape->merge_stage_size) {
spin_unlock_irqrestore(&ide_lock, flags);
return 1;
}
spin_unlock_irqrestore(&ide_lock, flags);
DRIVER(drive)->busy = 0;
(void) ide_unregister_subdriver(drive);
ide_unregister_subdriver(drive, tape->driver);
ide_unregister_region(tape->disk);
@ -4710,6 +4701,8 @@ static void ide_tape_release(struct kref *kref)
ide_drive_t *drive = tape->drive;
struct gendisk *g = tape->disk;
BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
drive->dsc_overlap = 0;
drive->driver_data = NULL;
devfs_remove("%s/mt", drive->devfs_name);
@ -4747,26 +4740,24 @@ static ide_proc_entry_t idetape_proc[] = {
#endif
static int idetape_attach(ide_drive_t *drive);
static int ide_tape_probe(struct device *);
/*
* IDE subdriver functions, registered with ide.c
*/
static ide_driver_t idetape_driver = {
.owner = THIS_MODULE,
.name = "ide-tape",
.gen_driver = {
.name = "ide-tape",
.bus = &ide_bus_type,
.probe = ide_tape_probe,
.remove = ide_tape_remove,
},
.version = IDETAPE_VERSION,
.media = ide_tape,
.busy = 1,
.supports_dsc_overlap = 1,
.cleanup = idetape_cleanup,
.do_request = idetape_do_request,
.end_request = idetape_end_request,
.error = __ide_error,
.abort = __ide_abort,
.proc = idetape_proc,
.attach = idetape_attach,
.drives = LIST_HEAD_INIT(idetape_driver.drives),
};
/*
@ -4829,8 +4820,9 @@ static struct block_device_operations idetape_block_ops = {
.ioctl = idetape_ioctl,
};
static int idetape_attach (ide_drive_t *drive)
static int ide_tape_probe(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
idetape_tape_t *tape;
struct gendisk *g;
int minor;
@ -4865,10 +4857,7 @@ static int idetape_attach (ide_drive_t *drive)
ide_init_disk(g, drive);
if (ide_register_subdriver(drive, &idetape_driver)) {
printk(KERN_ERR "ide-tape: %s: Failed to register the driver with ide.c\n", drive->name);
goto out_put_disk;
}
ide_register_subdriver(drive, &idetape_driver);
memset(tape, 0, sizeof(*tape));
@ -4902,12 +4891,11 @@ static int idetape_attach (ide_drive_t *drive)
ide_register_region(g);
return 0;
out_put_disk:
put_disk(g);
out_free_tape:
kfree(tape);
failed:
return 1;
return -ENODEV;
}
MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
@ -4915,7 +4903,7 @@ MODULE_LICENSE("GPL");
static void __exit idetape_exit (void)
{
ide_unregister_driver(&idetape_driver);
driver_unregister(&idetape_driver.gen_driver);
unregister_chrdev(IDETAPE_MAJOR, "ht");
}
@ -4928,8 +4916,7 @@ static int idetape_init (void)
printk(KERN_ERR "ide-tape: Failed to register character device interface\n");
return -EBUSY;
}
ide_register_driver(&idetape_driver);
return 0;
return driver_register(&idetape_driver.gen_driver);
}
module_init(idetape_init);

View file

@ -196,8 +196,6 @@ ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
EXPORT_SYMBOL(ide_hwifs);
static struct list_head ide_drives = LIST_HEAD_INIT(ide_drives);
/*
* Do not even *think* about calling this!
*/
@ -358,54 +356,6 @@ static int ide_system_bus_speed(void)
return system_bus_speed;
}
/*
* drives_lock protects the list of drives, drivers_lock the
* list of drivers. Currently nobody takes both at once.
*/
static DEFINE_SPINLOCK(drives_lock);
static DEFINE_SPINLOCK(drivers_lock);
static LIST_HEAD(drivers);
/* Iterator for the driver list. */
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct list_head *p;
loff_t l = *pos;
spin_lock(&drivers_lock);
list_for_each(p, &drivers)
if (!l--)
return list_entry(p, ide_driver_t, drivers);
return NULL;
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct list_head *p = ((ide_driver_t *)v)->drivers.next;
(*pos)++;
return p==&drivers ? NULL : list_entry(p, ide_driver_t, drivers);
}
static void m_stop(struct seq_file *m, void *v)
{
spin_unlock(&drivers_lock);
}
static int show_driver(struct seq_file *m, void *v)
{
ide_driver_t *driver = v;
seq_printf(m, "%s version %s\n", driver->name, driver->version);
return 0;
}
struct seq_operations ide_drivers_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = show_driver
};
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_ide_root;
#endif
@ -630,7 +580,7 @@ void ide_unregister(unsigned int index)
ide_hwif_t *hwif, *g;
static ide_hwif_t tmp_hwif; /* protected by ide_cfg_sem */
ide_hwgroup_t *hwgroup;
int irq_count = 0, unit, i;
int irq_count = 0, unit;
BUG_ON(index >= MAX_HWIFS);
@ -643,23 +593,22 @@ void ide_unregister(unsigned int index)
goto abort;
for (unit = 0; unit < MAX_DRIVES; ++unit) {
drive = &hwif->drives[unit];
if (!drive->present)
if (!drive->present) {
if (drive->devfs_name[0] != '\0') {
devfs_remove(drive->devfs_name);
drive->devfs_name[0] = '\0';
}
continue;
if (drive->usage || DRIVER(drive)->busy)
goto abort;
drive->dead = 1;
}
spin_unlock_irq(&ide_lock);
device_unregister(&drive->gendev);
down(&drive->gendev_rel_sem);
spin_lock_irq(&ide_lock);
}
hwif->present = 0;
spin_unlock_irq(&ide_lock);
for (unit = 0; unit < MAX_DRIVES; ++unit) {
drive = &hwif->drives[unit];
if (!drive->present)
continue;
DRIVER(drive)->cleanup(drive);
}
destroy_proc_ide_interface(hwif);
hwgroup = hwif->hwgroup;
@ -687,44 +636,6 @@ void ide_unregister(unsigned int index)
* Remove us from the hwgroup, and free
* the hwgroup if we were the only member
*/
for (i = 0; i < MAX_DRIVES; ++i) {
drive = &hwif->drives[i];
if (drive->devfs_name[0] != '\0') {
devfs_remove(drive->devfs_name);
drive->devfs_name[0] = '\0';
}
if (!drive->present)
continue;
if (drive == drive->next) {
/* special case: last drive from hwgroup. */
BUG_ON(hwgroup->drive != drive);
hwgroup->drive = NULL;
} else {
ide_drive_t *walk;
walk = hwgroup->drive;
while (walk->next != drive)
walk = walk->next;
walk->next = drive->next;
if (hwgroup->drive == drive) {
hwgroup->drive = drive->next;
hwgroup->hwif = HWIF(hwgroup->drive);
}
}
BUG_ON(hwgroup->drive == drive);
if (drive->id != NULL) {
kfree(drive->id);
drive->id = NULL;
}
drive->present = 0;
/* Messed up locking ... */
spin_unlock_irq(&ide_lock);
blk_cleanup_queue(drive->queue);
device_unregister(&drive->gendev);
down(&drive->gendev_rel_sem);
spin_lock_irq(&ide_lock);
drive->queue = NULL;
}
if (hwif->next == hwif) {
BUG_ON(hwgroup->hwif != hwif);
kfree(hwgroup);
@ -1304,73 +1215,6 @@ int system_bus_clock (void)
EXPORT_SYMBOL(system_bus_clock);
/*
* Locking is badly broken here - since way back. That sucker is
* root-only, but that's not an excuse... The real question is what
* exclusion rules do we want here.
*/
int ide_replace_subdriver (ide_drive_t *drive, const char *driver)
{
if (!drive->present || drive->usage || drive->dead)
goto abort;
if (DRIVER(drive)->cleanup(drive))
goto abort;
strlcpy(drive->driver_req, driver, sizeof(drive->driver_req));
if (ata_attach(drive)) {
spin_lock(&drives_lock);
list_del_init(&drive->list);
spin_unlock(&drives_lock);
drive->driver_req[0] = 0;
ata_attach(drive);
} else {
drive->driver_req[0] = 0;
}
if (drive->driver && !strcmp(drive->driver->name, driver))
return 0;
abort:
return 1;
}
/**
* ata_attach - attach an ATA/ATAPI device
* @drive: drive to attach
*
* Takes a drive that is as yet not assigned to any midlayer IDE
* driver (or is assigned to the default driver) and figures out
* which driver would like to own it. If nobody claims the drive
* then it is automatically attached to the default driver used for
* unclaimed objects.
*
* A return of zero indicates attachment to a driver, of one
* attachment to the default driver.
*
* Takes drivers_lock.
*/
int ata_attach(ide_drive_t *drive)
{
struct list_head *p;
spin_lock(&drivers_lock);
list_for_each(p, &drivers) {
ide_driver_t *driver = list_entry(p, ide_driver_t, drivers);
if (!try_module_get(driver->owner))
continue;
spin_unlock(&drivers_lock);
if (driver->attach(drive) == 0) {
module_put(driver->owner);
drive->gendev.driver = &driver->gen_driver;
return 0;
}
spin_lock(&drivers_lock);
module_put(driver->owner);
}
drive->gendev.driver = NULL;
spin_unlock(&drivers_lock);
if (ide_register_subdriver(drive, NULL))
panic("ide: default attach failed");
return 1;
}
static int generic_ide_suspend(struct device *dev, pm_message_t state)
{
ide_drive_t *drive = dev->driver_data;
@ -2013,27 +1857,11 @@ static void __init probe_for_hwifs (void)
#endif
}
int ide_register_subdriver(ide_drive_t *drive, ide_driver_t *driver)
void ide_register_subdriver(ide_drive_t *drive, ide_driver_t *driver)
{
unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
if (!drive->present || drive->driver != NULL ||
drive->usage || drive->dead) {
spin_unlock_irqrestore(&ide_lock, flags);
return 1;
}
drive->driver = driver;
spin_unlock_irqrestore(&ide_lock, flags);
spin_lock(&drives_lock);
list_add_tail(&drive->list, driver ? &driver->drives : &ide_drives);
spin_unlock(&drives_lock);
// printk(KERN_INFO "%s: attached %s driver.\n", drive->name, driver->name);
#ifdef CONFIG_PROC_FS
if (driver)
ide_add_proc_entries(drive->proc, driver->proc, drive);
ide_add_proc_entries(drive->proc, driver->proc, drive);
#endif
return 0;
}
EXPORT_SYMBOL(ide_register_subdriver);
@ -2041,136 +1869,51 @@ EXPORT_SYMBOL(ide_register_subdriver);
/**
* ide_unregister_subdriver - disconnect drive from driver
* @drive: drive to unplug
* @driver: driver
*
* Disconnect a drive from the driver it was attached to and then
* clean up the various proc files and other objects attached to it.
*
* Takes ide_setting_sem, ide_lock and drives_lock.
* Takes ide_setting_sem and ide_lock.
* Caller must hold none of the locks.
*
* No locking versus subdriver unload because we are moving to the
* default driver anyway. Wants double checking.
*/
int ide_unregister_subdriver (ide_drive_t *drive)
void ide_unregister_subdriver(ide_drive_t *drive, ide_driver_t *driver)
{
unsigned long flags;
down(&ide_setting_sem);
spin_lock_irqsave(&ide_lock, flags);
if (drive->usage || drive->driver == NULL || DRIVER(drive)->busy) {
spin_unlock_irqrestore(&ide_lock, flags);
up(&ide_setting_sem);
return 1;
}
#ifdef CONFIG_PROC_FS
ide_remove_proc_entries(drive->proc, DRIVER(drive)->proc);
ide_remove_proc_entries(drive->proc, driver->proc);
#endif
auto_remove_settings(drive);
drive->driver = NULL;
spin_unlock_irqrestore(&ide_lock, flags);
up(&ide_setting_sem);
spin_lock(&drives_lock);
list_del_init(&drive->list);
spin_unlock(&drives_lock);
/* drive will be added to &ide_drives in ata_attach() */
return 0;
}
EXPORT_SYMBOL(ide_unregister_subdriver);
static int ide_drive_remove(struct device * dev)
{
ide_drive_t * drive = container_of(dev,ide_drive_t,gendev);
DRIVER(drive)->cleanup(drive);
return 0;
}
/**
* ide_register_driver - register IDE device driver
* @driver: the IDE device driver
*
* Register a new device driver and then scan the devices
* on the IDE bus in case any should be attached to the
* driver we have just registered. If so attach them.
*
* Takes drivers_lock and drives_lock.
*/
int ide_register_driver(ide_driver_t *driver)
{
struct list_head list;
struct list_head *list_loop;
struct list_head *tmp_storage;
spin_lock(&drivers_lock);
list_add(&driver->drivers, &drivers);
spin_unlock(&drivers_lock);
INIT_LIST_HEAD(&list);
spin_lock(&drives_lock);
list_splice_init(&ide_drives, &list);
spin_unlock(&drives_lock);
list_for_each_safe(list_loop, tmp_storage, &list) {
ide_drive_t *drive = container_of(list_loop, ide_drive_t, list);
list_del_init(&drive->list);
if (drive->present)
ata_attach(drive);
}
driver->gen_driver.name = (char *) driver->name;
driver->gen_driver.bus = &ide_bus_type;
driver->gen_driver.remove = ide_drive_remove;
return driver_register(&driver->gen_driver);
}
EXPORT_SYMBOL(ide_register_driver);
/**
* ide_unregister_driver - unregister IDE device driver
* @driver: the IDE device driver
*
* Called when a driver module is being unloaded. We reattach any
* devices to whatever driver claims them next (typically the default
* driver).
*
* Takes drivers_lock and called functions will take ide_setting_sem.
*/
void ide_unregister_driver(ide_driver_t *driver)
{
ide_drive_t *drive;
spin_lock(&drivers_lock);
list_del(&driver->drivers);
spin_unlock(&drivers_lock);
driver_unregister(&driver->gen_driver);
while(!list_empty(&driver->drives)) {
drive = list_entry(driver->drives.next, ide_drive_t, list);
if (driver->cleanup(drive)) {
printk(KERN_ERR "%s: cleanup_module() called while still busy\n", drive->name);
BUG();
}
ata_attach(drive);
}
}
EXPORT_SYMBOL(ide_unregister_driver);
/*
* Probe module
*/
EXPORT_SYMBOL(ide_lock);
static int ide_bus_match(struct device *dev, struct device_driver *drv)
{
return 1;
}
struct bus_type ide_bus_type = {
.name = "ide",
.match = ide_bus_match,
.suspend = generic_ide_suspend,
.resume = generic_ide_resume,
};
EXPORT_SYMBOL_GPL(ide_bus_type);
/*
* This is gets invoked once during initialization, to set *everything* up
*/

View file

@ -587,7 +587,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
init_mad(query->sa_query.mad, agent);
query->sa_query.callback = ib_sa_path_rec_callback;
query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
query->sa_query.release = ib_sa_path_rec_release;
query->sa_query.port = port;
query->sa_query.mad->mad_hdr.method = IB_MGMT_METHOD_GET;
@ -663,7 +663,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
init_mad(query->sa_query.mad, agent);
query->sa_query.callback = ib_sa_mcmember_rec_callback;
query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
query->sa_query.release = ib_sa_mcmember_rec_release;
query->sa_query.port = port;
query->sa_query.mad->mad_hdr.method = method;
@ -698,20 +698,21 @@ static void send_handler(struct ib_mad_agent *agent,
if (!query)
return;
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
query->callback(query, -ETIMEDOUT, NULL);
break;
case IB_WC_WR_FLUSH_ERR:
query->callback(query, -EINTR, NULL);
break;
default:
query->callback(query, -EIO, NULL);
break;
}
if (query->callback)
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
query->callback(query, -ETIMEDOUT, NULL);
break;
case IB_WC_WR_FLUSH_ERR:
query->callback(query, -EINTR, NULL);
break;
default:
query->callback(query, -EIO, NULL);
break;
}
dma_unmap_single(agent->device->dma_device,
pci_unmap_addr(query, mapping),
@ -736,7 +737,7 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
query = idr_find(&query_idr, mad_recv_wc->wc->wr_id);
spin_unlock_irqrestore(&idr_lock, flags);
if (query) {
if (query && query->callback) {
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ?

View file

@ -499,6 +499,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
static int ib_umad_close(struct inode *inode, struct file *filp)
{
struct ib_umad_file *file = filp->private_data;
struct ib_umad_packet *packet, *tmp;
int i;
for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
@ -507,6 +508,9 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
ib_unregister_mad_agent(file->agent[i]);
}
list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
kfree(packet);
kfree(file);
return 0;

View file

@ -147,7 +147,7 @@ struct ib_sa_path_rec {
/* reserved */
u8 sl;
u8 mtu_selector;
enum ib_mtu mtu;
u8 mtu;
u8 rate_selector;
u8 rate;
u8 packet_life_time_selector;
@ -180,7 +180,7 @@ struct ib_sa_mcmember_rec {
u32 qkey;
u16 mlid;
u8 mtu_selector;
enum ib_mtu mtu;
u8 mtu;
u8 traffic_class;
u16 pkey;
u8 rate_selector;

View file

@ -39,15 +39,16 @@
#define MANUAL_MASK 0xe0
#define AUTO_MASK 0x20
static u8 TEMP_REG[3] = {0x26, 0x25, 0x27}; /* local, cpu, gpu */
static u8 LIMIT_REG[3] = {0x6b, 0x6a, 0x6c}; /* local, cpu, gpu */
static u8 TEMP_REG[3] = {0x26, 0x25, 0x27}; /* local, sensor1, sensor2 */
static u8 LIMIT_REG[3] = {0x6b, 0x6a, 0x6c}; /* local, sensor1, sensor2 */
static u8 MANUAL_MODE[2] = {0x5c, 0x5d};
static u8 REM_CONTROL[2] = {0x00, 0x40};
static u8 FAN_SPEED[2] = {0x28, 0x2a};
static u8 FAN_SPD_SET[2] = {0x30, 0x31};
static u8 default_limits_local[3] = {70, 50, 70}; /* local, cpu, gpu */
static u8 default_limits_chip[3] = {80, 65, 80}; /* local, cpu, gpu */
static u8 default_limits_local[3] = {70, 50, 70}; /* local, sensor1, sensor2 */
static u8 default_limits_chip[3] = {80, 65, 80}; /* local, sensor1, sensor2 */
static char *sensor_location[3] = {NULL, NULL, NULL};
static int limit_adjust = 0;
static int fan_speed = -1;
@ -58,7 +59,7 @@ MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and "
MODULE_LICENSE("GPL");
module_param(limit_adjust, int, 0644);
MODULE_PARM_DESC(limit_adjust,"Adjust maximum temperatures (50 cpu, 70 gpu) "
MODULE_PARM_DESC(limit_adjust,"Adjust maximum temperatures (50 sensor1, 70 sensor2) "
"by N degrees.");
module_param(fan_speed, int, 0644);
@ -213,10 +214,10 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
if (th->last_speed[fan] != speed) {
if (speed == -1)
printk(KERN_DEBUG "adt746x: Setting speed to automatic "
"for %s fan.\n", fan?"GPU":"CPU");
"for %s fan.\n", sensor_location[fan+1]);
else
printk(KERN_DEBUG "adt746x: Setting speed to %d "
"for %s fan.\n", speed, fan?"GPU":"CPU");
"for %s fan.\n", speed, sensor_location[fan+1]);
} else
return;
@ -300,11 +301,11 @@ static void update_fans_speed (struct thermostat *th)
printk(KERN_DEBUG "adt746x: setting fans speed to %d "
"(limit exceeded by %d on %s) \n",
new_speed, var,
fan_number?"GPU/pwr":"CPU");
sensor_location[fan_number+1]);
write_both_fan_speed(th, new_speed);
th->last_var[fan_number] = var;
} else if (var < -2) {
/* don't stop fan if GPU/power is cold and CPU is not
/* don't stop fan if sensor2 is cold and sensor1 is not
* so cold (lastvar >= -1) */
if (i == 2 && lastvar < -1) {
if (th->last_speed[fan_number] != 0)
@ -318,7 +319,7 @@ static void update_fans_speed (struct thermostat *th)
if (started)
return; /* we don't want to re-stop the fan
* if CPU is heating and GPU/power is not */
* if sensor1 is heating and sensor2 is not */
}
}
@ -353,7 +354,7 @@ static int monitor_task(void *arg)
static void set_limit(struct thermostat *th, int i)
{
/* Set CPU limit higher to avoid powerdowns */
/* Set sensor1 limit higher to avoid powerdowns */
th->limits[i] = default_limits_chip[i] + limit_adjust;
write_reg(th, LIMIT_REG[i], th->limits[i]);
@ -461,6 +462,12 @@ static ssize_t show_##name(struct device *dev, char *buf) \
return sprintf(buf, "%d\n", data); \
}
#define BUILD_SHOW_FUNC_STR(name, data) \
static ssize_t show_##name(struct device *dev, char *buf) \
{ \
return sprintf(buf, "%s\n", data); \
}
#define BUILD_SHOW_FUNC_FAN(name, data) \
static ssize_t show_##name(struct device *dev, char *buf) \
{ \
@ -476,7 +483,7 @@ static ssize_t store_##name(struct device *dev, const char *buf, size_t n) \
int val; \
int i; \
val = simple_strtol(buf, NULL, 10); \
printk(KERN_INFO "Adjusting limits by %d°C\n", val); \
printk(KERN_INFO "Adjusting limits by %d degrees\n", val); \
limit_adjust = val; \
for (i=0; i < 3; i++) \
set_limit(thermostat, i); \
@ -495,35 +502,41 @@ static ssize_t store_##name(struct device *dev, const char *buf, size_t n) \
return n; \
}
BUILD_SHOW_FUNC_INT(cpu_temperature, (read_reg(thermostat, TEMP_REG[1])))
BUILD_SHOW_FUNC_INT(gpu_temperature, (read_reg(thermostat, TEMP_REG[2])))
BUILD_SHOW_FUNC_INT(cpu_limit, thermostat->limits[1])
BUILD_SHOW_FUNC_INT(gpu_limit, thermostat->limits[2])
BUILD_SHOW_FUNC_INT(sensor1_temperature, (read_reg(thermostat, TEMP_REG[1])))
BUILD_SHOW_FUNC_INT(sensor2_temperature, (read_reg(thermostat, TEMP_REG[2])))
BUILD_SHOW_FUNC_INT(sensor1_limit, thermostat->limits[1])
BUILD_SHOW_FUNC_INT(sensor2_limit, thermostat->limits[2])
BUILD_SHOW_FUNC_STR(sensor1_location, sensor_location[1])
BUILD_SHOW_FUNC_STR(sensor2_location, sensor_location[2])
BUILD_SHOW_FUNC_INT(specified_fan_speed, fan_speed)
BUILD_SHOW_FUNC_FAN(cpu_fan_speed, 0)
BUILD_SHOW_FUNC_FAN(gpu_fan_speed, 1)
BUILD_SHOW_FUNC_FAN(sensor1_fan_speed, 0)
BUILD_SHOW_FUNC_FAN(sensor2_fan_speed, 1)
BUILD_STORE_FUNC_INT(specified_fan_speed,fan_speed)
BUILD_SHOW_FUNC_INT(limit_adjust, limit_adjust)
BUILD_STORE_FUNC_DEG(limit_adjust, thermostat)
static DEVICE_ATTR(cpu_temperature, S_IRUGO,
show_cpu_temperature,NULL);
static DEVICE_ATTR(gpu_temperature, S_IRUGO,
show_gpu_temperature,NULL);
static DEVICE_ATTR(cpu_limit, S_IRUGO,
show_cpu_limit, NULL);
static DEVICE_ATTR(gpu_limit, S_IRUGO,
show_gpu_limit, NULL);
static DEVICE_ATTR(sensor1_temperature, S_IRUGO,
show_sensor1_temperature,NULL);
static DEVICE_ATTR(sensor2_temperature, S_IRUGO,
show_sensor2_temperature,NULL);
static DEVICE_ATTR(sensor1_limit, S_IRUGO,
show_sensor1_limit, NULL);
static DEVICE_ATTR(sensor2_limit, S_IRUGO,
show_sensor2_limit, NULL);
static DEVICE_ATTR(sensor1_location, S_IRUGO,
show_sensor1_location, NULL);
static DEVICE_ATTR(sensor2_location, S_IRUGO,
show_sensor2_location, NULL);
static DEVICE_ATTR(specified_fan_speed, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH,
show_specified_fan_speed,store_specified_fan_speed);
static DEVICE_ATTR(cpu_fan_speed, S_IRUGO,
show_cpu_fan_speed, NULL);
static DEVICE_ATTR(gpu_fan_speed, S_IRUGO,
show_gpu_fan_speed, NULL);
static DEVICE_ATTR(sensor1_fan_speed, S_IRUGO,
show_sensor1_fan_speed, NULL);
static DEVICE_ATTR(sensor2_fan_speed, S_IRUGO,
show_sensor2_fan_speed, NULL);
static DEVICE_ATTR(limit_adjust, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH,
show_limit_adjust, store_limit_adjust);
@ -534,6 +547,7 @@ thermostat_init(void)
{
struct device_node* np;
u32 *prop;
int i = 0, offset = 0;
np = of_find_node_by_name(NULL, "fan");
if (!np)
@ -545,6 +559,12 @@ thermostat_init(void)
else
return -ENODEV;
prop = (u32 *)get_property(np, "hwsensor-params-version", NULL);
printk(KERN_INFO "adt746x: version %d (%ssupported)\n", *prop,
(*prop == 1)?"":"un");
if (*prop != 1)
return -ENODEV;
prop = (u32 *)get_property(np, "reg", NULL);
if (!prop)
return -ENODEV;
@ -563,6 +583,23 @@ thermostat_init(void)
"limit_adjust: %d, fan_speed: %d\n",
therm_bus, therm_address, limit_adjust, fan_speed);
if (get_property(np, "hwsensor-location", NULL)) {
for (i = 0; i < 3; i++) {
sensor_location[i] = get_property(np,
"hwsensor-location", NULL) + offset;
if (sensor_location[i] == NULL)
sensor_location[i] = "";
printk(KERN_INFO "sensor %d: %s\n", i, sensor_location[i]);
offset += strlen(sensor_location[i]) + 1;
}
} else {
sensor_location[0] = "?";
sensor_location[1] = "?";
sensor_location[2] = "?";
}
of_dev = of_platform_device_create(np, "temperatures");
if (of_dev == NULL) {
@ -570,15 +607,17 @@ thermostat_init(void)
return -ENODEV;
}
device_create_file(&of_dev->dev, &dev_attr_cpu_temperature);
device_create_file(&of_dev->dev, &dev_attr_gpu_temperature);
device_create_file(&of_dev->dev, &dev_attr_cpu_limit);
device_create_file(&of_dev->dev, &dev_attr_gpu_limit);
device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature);
device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature);
device_create_file(&of_dev->dev, &dev_attr_sensor1_limit);
device_create_file(&of_dev->dev, &dev_attr_sensor2_limit);
device_create_file(&of_dev->dev, &dev_attr_sensor1_location);
device_create_file(&of_dev->dev, &dev_attr_sensor2_location);
device_create_file(&of_dev->dev, &dev_attr_limit_adjust);
device_create_file(&of_dev->dev, &dev_attr_specified_fan_speed);
device_create_file(&of_dev->dev, &dev_attr_cpu_fan_speed);
device_create_file(&of_dev->dev, &dev_attr_sensor1_fan_speed);
if(therm_type == ADT7460)
device_create_file(&of_dev->dev, &dev_attr_gpu_fan_speed);
device_create_file(&of_dev->dev, &dev_attr_sensor2_fan_speed);
#ifndef CONFIG_I2C_KEYWEST
request_module("i2c-keywest");
@ -591,17 +630,19 @@ static void __exit
thermostat_exit(void)
{
if (of_dev) {
device_remove_file(&of_dev->dev, &dev_attr_cpu_temperature);
device_remove_file(&of_dev->dev, &dev_attr_gpu_temperature);
device_remove_file(&of_dev->dev, &dev_attr_cpu_limit);
device_remove_file(&of_dev->dev, &dev_attr_gpu_limit);
device_remove_file(&of_dev->dev, &dev_attr_sensor1_temperature);
device_remove_file(&of_dev->dev, &dev_attr_sensor2_temperature);
device_remove_file(&of_dev->dev, &dev_attr_sensor1_limit);
device_remove_file(&of_dev->dev, &dev_attr_sensor2_limit);
device_remove_file(&of_dev->dev, &dev_attr_sensor1_location);
device_remove_file(&of_dev->dev, &dev_attr_sensor2_location);
device_remove_file(&of_dev->dev, &dev_attr_limit_adjust);
device_remove_file(&of_dev->dev, &dev_attr_specified_fan_speed);
device_remove_file(&of_dev->dev, &dev_attr_cpu_fan_speed);
device_remove_file(&of_dev->dev, &dev_attr_sensor1_fan_speed);
if(therm_type == ADT7460)
device_remove_file(&of_dev->dev,
&dev_attr_gpu_fan_speed);
&dev_attr_sensor2_fan_speed);
of_device_unregister(of_dev);
}

View file

@ -363,6 +363,9 @@ int bttv_I2CWrite(struct bttv *btv, unsigned char addr, unsigned char b1,
/* read EEPROM content */
void __devinit bttv_readee(struct bttv *btv, unsigned char *eedata, int addr)
{
memset(eedata, 0, 256);
if (0 != btv->i2c_rc)
return;
btv->i2c_client.addr = addr >> 1;
tveeprom_read(&btv->i2c_client, eedata, 256);
}

View file

@ -1555,6 +1555,7 @@ config SIS900
tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
depends on NET_PCI && PCI
select CRC32
select MII
---help---
This is a driver for the Fast Ethernet PCI network cards based on
the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in
@ -2031,6 +2032,15 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
config BNX2
tristate "Broadcom NetXtremeII support"
depends on PCI
help
This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
To compile this driver as a module, choose M here: the module
will be called bnx2. This is recommended.
config GIANFAR
tristate "Gianfar Ethernet"
depends on 85xx || 83xx

View file

@ -51,6 +51,7 @@ obj-$(CONFIG_NS83820) += ns83820.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_TC35815) += tc35815.o
obj-$(CONFIG_SK98LIN) += sk98lin/
obj-$(CONFIG_SKFP) += skfp/

View file

@ -738,6 +738,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
short vtag;
#endif
int rx_pkt_limit = dev->quota;
unsigned long flags;
do{
/* process receive packets until we use the quota*/
@ -841,18 +842,19 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
/* Receive descriptor is empty now */
dev->quota -= num_rx_pkt;
*budget -= num_rx_pkt;
spin_lock_irqsave(&lp->lock, flags);
netif_rx_complete(dev);
/* enable receive interrupt */
writel(VAL0|RINTEN0, mmio + INTEN0);
writel(VAL2 | RDMD0, mmio + CMD0);
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
rx_not_empty:
/* Do not call a netif_rx_complete */
dev->quota -= num_rx_pkt;
*budget -= num_rx_pkt;
return 1;
}
#else
@ -1261,18 +1263,20 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
struct net_device * dev = (struct net_device *) dev_id;
struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio;
unsigned int intr0;
unsigned int intr0, intren0;
unsigned int handled = 1;
if(dev == NULL)
if(unlikely(dev == NULL))
return IRQ_NONE;
if (regs) spin_lock (&lp->lock);
spin_lock(&lp->lock);
/* disabling interrupt */
writel(INTREN, mmio + CMD0);
/* Read interrupt status */
intr0 = readl(mmio + INT0);
intren0 = readl(mmio + INTEN0);
/* Process all the INT event until INTR bit is clear. */
@ -1293,11 +1297,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
/* Schedule a polling routine */
__netif_rx_schedule(dev);
}
else {
else if (intren0 & RINTEN0) {
printk("************Driver bug! \
interrupt while in poll\n");
/* Fix by disabling interrupts */
writel(RINT0, mmio + INT0);
/* Fix by disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
}
}
#else
@ -1321,7 +1325,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
err_no_interrupt:
writel( VAL0 | INTREN,mmio + CMD0);
if (regs) spin_unlock(&lp->lock);
spin_unlock(&lp->lock);
return IRQ_RETVAL(handled);
}

5530
drivers/net/bnx2.c Normal file

File diff suppressed because it is too large Load diff

4352
drivers/net/bnx2.h Normal file

File diff suppressed because it is too large Load diff

2468
drivers/net/bnx2_fw.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -3037,7 +3037,7 @@ static void bond_activebackup_arp_mon(struct net_device *bond_dev)
bond_set_slave_inactive_flags(bond->current_arp_slave);
/* search for next candidate */
bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave) {
bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
if (IS_UP(slave->dev)) {
slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(slave);

View file

@ -155,9 +155,9 @@
#define DRV_NAME "e100"
#define DRV_EXT "-NAPI"
#define DRV_VERSION "3.3.6-k2"DRV_EXT
#define DRV_VERSION "3.4.8-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = {
INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, e100_id_table);
@ -269,6 +275,12 @@ enum scb_status {
rus_mask = 0x3C,
};
enum ru_state {
RU_SUSPENDED = 0,
RU_RUNNING = 1,
RU_UNINITIALIZED = -1,
};
enum scb_stat_ack {
stat_ack_not_ours = 0x00,
stat_ack_sw_gen = 0x04,
@ -510,7 +522,7 @@ struct nic {
struct rx *rx_to_use;
struct rx *rx_to_clean;
struct rfd blank_rfd;
int ru_running;
enum ru_state ru_running;
spinlock_t cb_lock ____cacheline_aligned;
spinlock_t cmd_lock;
@ -539,6 +551,7 @@ struct nic {
struct timer_list watchdog;
struct timer_list blink_timer;
struct mii_if_info mii;
struct work_struct tx_timeout_task;
enum loopback loopback;
struct mem *mem;
@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
return 0;
}
#define E100_WAIT_SCB_TIMEOUT 40
#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
{
unsigned long flags;
@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
* because the controller is too busy, so
* let's just queue the command and try again
* when another command is scheduled. */
if(err == -ENOSPC) {
//request a reset
schedule_work(&nic->tx_timeout_task);
}
break;
} else {
nic->cuc_cmd = cuc_resume;
@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
static void e100_get_defaults(struct nic *nic)
{
struct param_range rfds = { .min = 64, .max = 256, .count = 64 };
struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
struct param_range cbs = { .min = 64, .max = 256, .count = 64 };
pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic)
/* Quadwords to DMA into FIFO before starting frame transmit */
nic->tx_threshold = 0xE0;
nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf |
((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0));
/* no interrupt for every tx completion, delay = 256us if not 557*/
nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
/* Template for a freshly allocated RFD */
nic->blank_rfd.command = cpu_to_le16(cb_el);
@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
if(nic->flags & multicast_all)
config->multicast_all = 0x1; /* 1=accept, 0=no */
if(!(nic->flags & wol_magic))
/* disable WoL when up */
if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
config->magic_packet_disable = 0x1; /* 1=off, 0=on */
if(nic->mac >= mac_82558_D101_A4) {
@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic)
}
}
e100_exec_cmd(nic, cuc_dump_reset, 0);
if(e100_exec_cmd(nic, cuc_dump_reset, 0))
DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
}
static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = nic->tx_command;
/* interrupt every 16 packets regardless of delay */
if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i;
cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
// check for mapping failure?
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
}
@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
e100_exec_cmd(nic, cuc_nop, 0);
if(e100_exec_cmd(nic, cuc_nop, 0))
DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
udelay(1);
}
@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic)
return 0;
}
static inline void e100_start_receiver(struct nic *nic)
static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
{
if(!nic->rxs) return;
if(RU_SUSPENDED != nic->ru_running) return;
/* handle init time starts */
if(!rx) rx = nic->rxs;
/* (Re)start RU if suspended or idle and RFA is non-NULL */
if(!nic->ru_running && nic->rx_to_clean->skb) {
e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
nic->ru_running = 1;
if(rx->skb) {
e100_exec_cmd(nic, ruc_start, rx->dma_addr);
nic->ru_running = RU_RUNNING;
}
}
@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
if(pci_dma_mapping_error(rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = 0;
rx->dma_addr = 0;
return -ENOMEM;
}
/* Link the RFD to end of RFA by linking previous RFD to
* this one, and clearing EL bit of previous. */
if(rx->prev->skb) {
@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
/* If data isn't ready, nothing to indicate */
if(unlikely(!(rfd_status & cb_complete)))
return -EAGAIN;
return -ENODATA;
/* Get actual data size */
actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
pci_unmap_single(nic->pdev, rx->dma_addr,
RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
/* this allows for a fast restart without re-enabling interrupts */
if(le16_to_cpu(rfd->command) & cb_el)
nic->ru_running = RU_SUSPENDED;
/* Pull off the RFD and put the actual data (minus eth hdr) */
skb_reserve(skb, sizeof(struct rfd));
skb_put(skb, actual_size);
@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
unsigned int work_to_do)
{
struct rx *rx;
int restart_required = 0;
struct rx *rx_to_start = NULL;
/* are we already rnr? then pay attention!!! this ensures that
* the state machine progression never allows a start with a
* partially cleaned list, avoiding a race between hardware
* and rx_to_clean when in NAPI mode */
if(RU_SUSPENDED == nic->ru_running)
restart_required = 1;
/* Indicate newly arrived packets */
for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
if(e100_rx_indicate(nic, rx, work_done, work_to_do))
int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
if(-EAGAIN == err) {
/* hit quota so have more work to do, restart once
* cleanup is complete */
restart_required = 0;
break;
} else if(-ENODATA == err)
break; /* No more to clean */
}
/* save our starting point as the place we'll restart the receiver */
if(restart_required)
rx_to_start = nic->rx_to_clean;
/* Alloc new skbs to refill list */
for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
if(unlikely(e100_rx_alloc_skb(nic, rx)))
break; /* Better luck next time (see watchdog) */
}
e100_start_receiver(nic);
if(restart_required) {
// ack the rnr?
writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
e100_start_receiver(nic, rx_to_start);
if(work_done)
(*work_done)++;
}
}
static void e100_rx_clean_list(struct nic *nic)
@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic)
struct rx *rx;
unsigned int i, count = nic->params.rfds.count;
nic->ru_running = RU_UNINITIALIZED;
if(nic->rxs) {
for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
if(rx->skb) {
@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic)
}
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = 0;
}
static int e100_rx_alloc_list(struct nic *nic)
@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic)
unsigned int i, count = nic->params.rfds.count;
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
return -ENOMEM;
@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic)
}
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
nic->ru_running = RU_SUSPENDED;
return 0;
}
@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
/* We hit Receive No Resource (RNR); restart RU after cleaning */
if(stat_ack & stat_ack_rnr)
nic->ru_running = 0;
nic->ru_running = RU_SUSPENDED;
e100_disable_irq(nic);
netif_rx_schedule(netdev);
@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
#ifdef CONFIG_PM
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic)
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
}
#endif
static int e100_up(struct nic *nic)
{
@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic)
if((err = e100_hw_init(nic)))
goto err_clean_cbs;
e100_set_multicast_list(nic->netdev);
e100_start_receiver(nic);
e100_start_receiver(nic, 0);
mod_timer(&nic->watchdog, jiffies);
if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
nic->netdev->name, nic->netdev)))
goto err_no_irq;
e100_enable_irq(nic);
netif_wake_queue(nic->netdev);
netif_poll_enable(nic->netdev);
/* enable ints _after_ enabling poll, preventing a race between
* disable ints+schedule */
e100_enable_irq(nic);
return 0;
err_no_irq:
@ -1703,11 +1778,13 @@ err_rx_clean_list:
static void e100_down(struct nic *nic)
{
/* wait here for poll to complete */
netif_poll_disable(nic->netdev);
netif_stop_queue(nic->netdev);
e100_hw_reset(nic);
free_irq(nic->pdev->irq, nic->netdev);
del_timer_sync(&nic->watchdog);
netif_carrier_off(nic->netdev);
netif_stop_queue(nic->netdev);
e100_clean_cbs(nic);
e100_rx_clean_list(nic);
}
@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
/* Reset outside of interrupt context, to avoid request_irq
* in interrupt context */
schedule_work(&nic->tx_timeout_task);
}
static void e100_tx_timeout_task(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
readb(&nic->csr->scb.status));
e100_down(netdev_priv(netdev));
@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
BMCR_LOOPBACK);
e100_start_receiver(nic);
e100_start_receiver(nic, 0);
if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
err = -ENOMEM;
@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
else
nic->flags &= ~wol_magic;
pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
e100_exec_cb(nic, NULL, e100_configure);
return 0;
@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
e100_get_defaults(nic);
/* locks must be initialized before calling hw_reset */
spin_lock_init(&nic->cb_lock);
spin_lock_init(&nic->cmd_lock);
@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->blink_timer.function = e100_blink_led;
nic->blink_timer.data = (unsigned long)nic;
INIT_WORK(&nic->tx_timeout_task,
(void (*)(void *))e100_tx_timeout_task, netdev);
if((err = e100_alloc(nic))) {
DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
goto err_out_iounmap;
@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
(nic->eeprom[eeprom_id] & eeprom_id_wol))
nic->flags |= wol_magic;
pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
/* ack any pending wake events, disable PME */
pci_enable_wake(pdev, 0, 0);
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev))) {
@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
e100_hw_init(nic);
/* ack any pending wake events, disable PME */
pci_enable_wake(pdev, 0, 0);
if(e100_hw_init(nic))
DPRINTK(HW, ERR, "e100_hw_init failed\n");
netif_device_attach(netdev);
if(netif_running(netdev))
@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev)
}
#endif
static void e100_shutdown(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
#ifdef CONFIG_PM
pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
#else
pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
#endif
}
static struct pci_driver e100_driver = {
.name = DRV_NAME,
.id_table = e100_id_table,
@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = {
.suspend = e100_suspend,
.resume = e100_resume,
#endif
.driver = {
.shutdown = e100_shutdown,
}
};
static int __init e100_init_module(void)

View file

@ -1,7 +1,7 @@
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@ -112,6 +112,8 @@ struct e1000_adapter;
#define E1000_MAX_82544_RXD 4096
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
#define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192
@ -137,15 +139,19 @@ struct e1000_adapter;
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
#define E1000_EEPROM_APME 0x0400
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0400
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
#define E1000_MNG_VLAN_NONE -1
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
/* only works for sizes that are powers of 2 */
#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
@ -159,6 +165,9 @@ struct e1000_buffer {
uint16_t next_to_watch;
};
struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; };
struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; };
struct e1000_desc_ring {
/* pointer to the descriptor ring memory */
void *desc;
@ -174,12 +183,19 @@ struct e1000_desc_ring {
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
/* arrays of page information for packet split */
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
};
#define E1000_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
@ -192,6 +208,7 @@ struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct vlan_group *vlgrp;
uint16_t mng_vlan_id;
uint32_t bd_number;
uint32_t rx_buffer_len;
uint32_t part_num;
@ -228,14 +245,23 @@ struct e1000_adapter {
boolean_t detect_tx_hung;
/* RX */
#ifdef CONFIG_E1000_NAPI
boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done,
int work_to_do);
#else
boolean_t (*clean_rx) (struct e1000_adapter *adapter);
#endif
void (*alloc_rx_buf) (struct e1000_adapter *adapter);
struct e1000_desc_ring rx_ring;
uint64_t hw_csum_err;
uint64_t hw_csum_good;
uint32_t rx_int_delay;
uint32_t rx_abs_int_delay;
boolean_t rx_csum;
boolean_t rx_ps;
uint32_t gorcl;
uint64_t gorcl_old;
uint16_t rx_ps_bsize0;
/* Interrupt Throttle Rate */
uint32_t itr;
@ -257,5 +283,8 @@ struct e1000_adapter {
int msg_enable;
#ifdef CONFIG_PCI_MSI
boolean_t have_msi;
#endif
};
#endif /* _E1000_H_ */

View file

@ -1,7 +1,7 @@
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
{ "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) },
{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
{ "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
{ "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
{ "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) },
@ -593,7 +594,7 @@ e1000_set_ringparam(struct net_device *netdev,
tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
if(netif_running(adapter->netdev))
@ -784,8 +785,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Hook up test interrupt handler just for this test */
if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
shared_int = FALSE;
} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
*data = 1;
return -1;
}
@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
* test failed.
*/
adapter->test_icr = 0;
E1000_WRITE_REG(&adapter->hw, IMC,
(~mask & 0x00007FFF));
E1000_WRITE_REG(&adapter->hw, ICS,
(~mask & 0x00007FFF));
E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
msec_delay(10);
if(adapter->test_icr) {
@ -919,7 +918,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
txdr->count = 80;
if(!txdr->count)
txdr->count = E1000_DEFAULT_TXD;
size = txdr->count * sizeof(struct e1000_buffer);
if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
@ -974,7 +974,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rxdr->count = 80;
if(!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD;
size = rxdr->count * sizeof(struct e1000_buffer);
if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
@ -1008,7 +1009,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb;
if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
GFP_KERNEL))) {
ret_val = 6;
goto err_nomem;
@ -1310,31 +1311,62 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
int i, ret_val;
int i, j, k, l, lc, good_cnt, ret_val=0;
unsigned long time;
E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
for(i = 0; i < 64; i++) {
e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024);
pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
PCI_DMA_TODEVICE);
}
E1000_WRITE_REG(&adapter->hw, TDT, i);
/* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
msec_delay(200);
i = 0;
do {
pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
PCI_DMA_FROMDEVICE);
ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb,
1024);
i++;
} while (ret_val != 0 && i < 64);
if(rxdr->count <= txdr->count)
lc = ((txdr->count / 64) * 2) + 1;
else
lc = ((rxdr->count / 64) * 2) + 1;
k = l = 0;
for(j = 0; j <= lc; j++) { /* loop count loop */
for(i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
pci_dma_sync_single_for_device(pdev,
txdr->buffer_info[k].dma,
txdr->buffer_info[k].length,
PCI_DMA_TODEVICE);
if(unlikely(++k == txdr->count)) k = 0;
}
E1000_WRITE_REG(&adapter->hw, TDT, k);
msec_delay(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
pci_dma_sync_single_for_cpu(pdev,
rxdr->buffer_info[l].dma,
rxdr->buffer_info[l].length,
PCI_DMA_FROMDEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
1024);
if(!ret_val)
good_cnt++;
if(unlikely(++l == rxdr->count)) l = 0;
/* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
} while (good_cnt < 64 && jiffies < (time + 20));
if(good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
if(jiffies >= (time + 2)) {
ret_val = 14; /* error code for time out error */
break;
}
} /* end loop count loop */
return ret_val;
}
@ -1354,13 +1386,12 @@ static int
e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
{
*data = 0;
if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
int i = 0;
adapter->hw.serdes_link_down = TRUE;
/* on some blade server designs link establishment */
/* could take as long as 2-3 minutes. */
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
do {
e1000_check_for_link(&adapter->hw);
if (adapter->hw.serdes_link_down == FALSE)
@ -1368,9 +1399,11 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
msec_delay(20);
} while (i++ < 3750);
*data = 1;
*data = 1;
} else {
e1000_check_for_link(&adapter->hw);
if(adapter->hw.autoneg) /* if auto_neg is set wait for it */
msec_delay(4000);
if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
*data = 1;

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@ -57,6 +57,7 @@ typedef enum {
e1000_82541_rev_2,
e1000_82547,
e1000_82547_rev_2,
e1000_82573,
e1000_num_macs
} e1000_mac_type;
@ -64,6 +65,7 @@ typedef enum {
e1000_eeprom_uninitialized = 0,
e1000_eeprom_spi,
e1000_eeprom_microwire,
e1000_eeprom_flash,
e1000_num_eeprom_types
} e1000_eeprom_type;
@ -96,6 +98,7 @@ typedef enum {
e1000_bus_type_unknown = 0,
e1000_bus_type_pci,
e1000_bus_type_pcix,
e1000_bus_type_pci_express,
e1000_bus_type_reserved
} e1000_bus_type;
@ -107,6 +110,7 @@ typedef enum {
e1000_bus_speed_100,
e1000_bus_speed_120,
e1000_bus_speed_133,
e1000_bus_speed_2500,
e1000_bus_speed_reserved
} e1000_bus_speed;
@ -115,6 +119,8 @@ typedef enum {
e1000_bus_width_unknown = 0,
e1000_bus_width_32,
e1000_bus_width_64,
e1000_bus_width_pciex_1,
e1000_bus_width_pciex_4,
e1000_bus_width_reserved
} e1000_bus_width;
@ -196,6 +202,7 @@ typedef enum {
typedef enum {
e1000_phy_m88 = 0,
e1000_phy_igp,
e1000_phy_igp_2,
e1000_phy_undefined = 0xFF
} e1000_phy_type;
@ -242,8 +249,19 @@ struct e1000_eeprom_info {
uint16_t address_bits;
uint16_t delay_usec;
uint16_t page_size;
boolean_t use_eerd;
boolean_t use_eewr;
};
/* Flex ASF Information */
#define E1000_HOST_IF_MAX_SIZE 2048
typedef enum {
e1000_byte_align = 0,
e1000_word_align = 1,
e1000_dword_align = 2
} e1000_align_type;
/* Error Codes */
@ -254,11 +272,16 @@ struct e1000_eeprom_info {
#define E1000_ERR_PARAM 4
#define E1000_ERR_MAC_TYPE 5
#define E1000_ERR_PHY_TYPE 6
#define E1000_ERR_RESET 9
#define E1000_ERR_MASTER_REQUESTS_PENDING 10
#define E1000_ERR_HOST_INTERFACE_COMMAND 11
#define E1000_BLK_PHY_RESET 12
/* Function prototypes */
/* Initialization */
int32_t e1000_reset_hw(struct e1000_hw *hw);
int32_t e1000_init_hw(struct e1000_hw *hw);
int32_t e1000_id_led_init(struct e1000_hw * hw);
int32_t e1000_set_mac_type(struct e1000_hw *hw);
void e1000_set_media_type(struct e1000_hw *hw);
@ -275,7 +298,7 @@ int32_t e1000_force_mac_fc(struct e1000_hw *hw);
/* PHY */
int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data);
int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
void e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_reset(struct e1000_hw *hw);
int32_t e1000_detect_gig_phy(struct e1000_hw *hw);
int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
@ -287,13 +310,86 @@ int32_t e1000_check_downshift(struct e1000_hw *hw);
int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
/* EEPROM Functions */
void e1000_init_eeprom_params(struct e1000_hw *hw);
int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
/* MNG HOST IF functions */
uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
#define E1000_MNG_IAMT_MODE 0x3
#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
#define E1000_VFTA_ENTRY_SHIFT 0x5
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
struct e1000_host_mng_command_header {
uint8_t command_id;
uint8_t checksum;
uint16_t reserved1;
uint16_t reserved2;
uint16_t command_length;
};
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
};
#ifdef __BIG_ENDIAN
struct e1000_host_mng_dhcp_cookie{
uint32_t signature;
uint16_t vlan_id;
uint8_t reserved0;
uint8_t status;
uint32_t reserved1;
uint8_t checksum;
uint8_t reserved3;
uint16_t reserved2;
};
#else
struct e1000_host_mng_dhcp_cookie{
uint32_t signature;
uint8_t status;
uint8_t reserved0;
uint16_t vlan_id;
uint32_t reserved1;
uint16_t reserved2;
uint8_t reserved3;
uint8_t checksum;
};
#endif
int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
int32_t e1000_mng_enable_host_if(struct e1000_hw *hw);
int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length, uint16_t offset, uint8_t *sum);
int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw,
struct e1000_host_mng_command_header* hdr);
int32_t e1000_mng_write_commit(struct e1000_hw *hw);
int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
int32_t e1000_read_mac_addr(struct e1000_hw * hw);
int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
/* Filters (multicast, vlan, receive) */
void e1000_init_rx_addrs(struct e1000_hw *hw);
@ -313,7 +409,6 @@ int32_t e1000_led_off(struct e1000_hw *hw);
/* Adaptive IFS Functions */
/* Everything else */
uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
void e1000_clear_hw_cntrs(struct e1000_hw *hw);
void e1000_reset_adaptive(struct e1000_hw *hw);
void e1000_update_adaptive(struct e1000_hw *hw);
@ -330,6 +425,19 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value);
int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up);
int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active);
void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
void e1000_enable_pciex_master(struct e1000_hw *hw);
int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
int32_t e1000_get_auto_rd_done(struct e1000_hw *hw);
int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw);
int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
void e1000_release_software_semaphore(struct e1000_hw *hw);
int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
int32_t e1000_commit_shadow_ram(struct e1000_hw *hw);
uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
#define E1000_READ_REG_IO(a, reg) \
e1000_read_reg_io((a), E1000_##reg)
@ -369,6 +477,10 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
#define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
@ -381,6 +493,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
#define E1000_REVISION_3 3
#define SPEED_10 10
#define SPEED_100 100
@ -437,6 +550,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
/* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor. We
* reserve one of these spots for our directed address, allowing us room for
@ -457,14 +571,74 @@ struct e1000_rx_desc {
uint16_t special;
};
/* Receive Descriptor - Extended */
union e1000_rx_desc_extended {
struct {
uint64_t buffer_addr;
uint64_t reserved;
} read;
struct {
struct {
uint32_t mrq; /* Multiple Rx Queues */
union {
uint32_t rss; /* RSS Hash */
struct {
uint16_t ip_id; /* IP id */
uint16_t csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
uint32_t status_error; /* ext status/error */
uint16_t length;
uint16_t vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
#define MAX_PS_BUFFERS 4
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
/* one buffer for protocol header(s), three data buffers */
uint64_t buffer_addr[MAX_PS_BUFFERS];
} read;
struct {
struct {
uint32_t mrq; /* Multiple Rx Queues */
union {
uint32_t rss; /* RSS Hash */
struct {
uint16_t ip_id; /* IP id */
uint16_t csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
uint32_t status_error; /* ext status/error */
uint16_t length0; /* length of buffer 0 */
uint16_t vlan; /* VLAN tag */
} middle;
struct {
uint16_t header_status;
uint16_t length[3]; /* length of buffers 1-3 */
} upper;
uint64_t reserved;
} wb; /* writeback */
};
/* Receive Decriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
@ -474,9 +648,20 @@ struct e1000_rx_desc {
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
#define E1000_RXD_SPC_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
#define E1000_RXD_SPC_PRI_SHIFT 13
#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
#define E1000_RXD_SPC_CFI_SHIFT 0x000C /* CFI is bit 12 */
#define E1000_RXD_SPC_CFI_SHIFT 12
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
#define E1000_RXDEXT_STATERR_CXE 0x10000000
#define E1000_RXDEXT_STATERR_TCPE 0x20000000
#define E1000_RXDEXT_STATERR_IPE 0x40000000
#define E1000_RXDEXT_STATERR_RXE 0x80000000
#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
/* mask to determine if packets should be dropped due to frame errors */
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
@ -486,6 +671,15 @@ struct e1000_rx_desc {
E1000_RXD_ERR_CXE | \
E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
E1000_RXDEXT_STATERR_SE | \
E1000_RXDEXT_STATERR_SEQ | \
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
/* Transmit Descriptor */
struct e1000_tx_desc {
uint64_t buffer_addr; /* Address of the descriptor's data buffer */
@ -667,6 +861,7 @@ struct e1000_ffvt_entry {
#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
#define E1000_RCTL 0x00100 /* RX Control - RW */
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
@ -676,9 +871,23 @@ struct e1000_ffvt_entry {
#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_FLASH_UPDATES 1000
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_FLSWCTL 0x01030 /* FLASH control register */
#define E1000_FLSWDATA 0x01034 /* FLASH data register */
#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
@ -688,6 +897,7 @@ struct e1000_ffvt_entry {
#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */
#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
@ -703,6 +913,14 @@ struct e1000_ffvt_entry {
#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
@ -761,7 +979,17 @@ struct e1000_ffvt_entry {
#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
#define E1000_IAC 0x4100 /* Interrupt Assertion Count */
#define E1000_ICRXPTC 0x4104 /* Interrupt Cause Rx Packet Timer Expire Count */
#define E1000_ICRXATC 0x4108 /* Interrupt Cause Rx Absolute Timer Expire Count */
#define E1000_ICTXPTC 0x410C /* Interrupt Cause Tx Packet Timer Expire Count */
#define E1000_ICTXATC 0x4110 /* Interrupt Cause Tx Absolute Timer Expire Count */
#define E1000_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */
#define E1000_ICTXQMTC 0x411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
#define E1000_ICRXDMTC 0x4120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
#define E1000_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */
#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
#define E1000_RA 0x05400 /* Receive Address - RW Array */
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
@ -779,6 +1007,16 @@ struct e1000_ffvt_entry {
#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
#define E1000_GCR 0x05B00 /* PCI-Ex Control */
#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
#define E1000_HICR 0x08F00 /* Host Inteface Control */
/* Register Set (82542)
*
* Some of the 82542 registers are located at different offsets than they are
@ -829,6 +1067,18 @@ struct e1000_ffvt_entry {
#define E1000_82542_VFTA 0x00600
#define E1000_82542_LEDCTL E1000_LEDCTL
#define E1000_82542_PBA E1000_PBA
#define E1000_82542_PBS E1000_PBS
#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
#define E1000_82542_EEARBC E1000_EEARBC
#define E1000_82542_FLASHT E1000_FLASHT
#define E1000_82542_EEWR E1000_EEWR
#define E1000_82542_FLSWCTL E1000_FLSWCTL
#define E1000_82542_FLSWDATA E1000_FLSWDATA
#define E1000_82542_FLSWCNT E1000_FLSWCNT
#define E1000_82542_FLOP E1000_FLOP
#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL
#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE
#define E1000_82542_ERT E1000_ERT
#define E1000_82542_RXDCTL E1000_RXDCTL
#define E1000_82542_RADV E1000_RADV
#define E1000_82542_RSRPD E1000_RSRPD
@ -913,6 +1163,38 @@ struct e1000_ffvt_entry {
#define E1000_82542_FFMT E1000_FFMT
#define E1000_82542_FFVT E1000_FFVT
#define E1000_82542_HOST_IF E1000_HOST_IF
#define E1000_82542_IAM E1000_IAM
#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
#define E1000_82542_PSRCTL E1000_PSRCTL
#define E1000_82542_RAID E1000_RAID
#define E1000_82542_TARC0 E1000_TARC0
#define E1000_82542_TDBAL1 E1000_TDBAL1
#define E1000_82542_TDBAH1 E1000_TDBAH1
#define E1000_82542_TDLEN1 E1000_TDLEN1
#define E1000_82542_TDH1 E1000_TDH1
#define E1000_82542_TDT1 E1000_TDT1
#define E1000_82542_TXDCTL1 E1000_TXDCTL1
#define E1000_82542_TARC1 E1000_TARC1
#define E1000_82542_RFCTL E1000_RFCTL
#define E1000_82542_GCR E1000_GCR
#define E1000_82542_GSCL_1 E1000_GSCL_1
#define E1000_82542_GSCL_2 E1000_GSCL_2
#define E1000_82542_GSCL_3 E1000_GSCL_3
#define E1000_82542_GSCL_4 E1000_GSCL_4
#define E1000_82542_FACTPS E1000_FACTPS
#define E1000_82542_SWSM E1000_SWSM
#define E1000_82542_FWSM E1000_FWSM
#define E1000_82542_FFLT_DBG E1000_FFLT_DBG
#define E1000_82542_IAC E1000_IAC
#define E1000_82542_ICRXPTC E1000_ICRXPTC
#define E1000_82542_ICRXATC E1000_ICRXATC
#define E1000_82542_ICTXPTC E1000_ICTXPTC
#define E1000_82542_ICTXATC E1000_ICTXATC
#define E1000_82542_ICTXQEC E1000_ICTXQEC
#define E1000_82542_ICTXQMTC E1000_ICTXQMTC
#define E1000_82542_ICRXDMTC E1000_ICRXDMTC
#define E1000_82542_ICRXOC E1000_ICRXOC
#define E1000_82542_HICR E1000_HICR
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
@ -974,11 +1256,21 @@ struct e1000_hw_stats {
uint64_t bptc;
uint64_t tsctc;
uint64_t tsctfc;
uint64_t iac;
uint64_t icrxptc;
uint64_t icrxatc;
uint64_t ictxptc;
uint64_t ictxatc;
uint64_t ictxqec;
uint64_t ictxqmtc;
uint64_t icrxdmtc;
uint64_t icrxoc;
};
/* Structure containing variables used by the shared code (e1000_hw.c) */
struct e1000_hw {
uint8_t __iomem *hw_addr;
uint8_t *hw_addr;
uint8_t *flash_address;
e1000_mac_type mac_type;
e1000_phy_type phy_type;
uint32_t phy_init_script;
@ -993,6 +1285,7 @@ struct e1000_hw {
e1000_ms_type original_master_slave;
e1000_ffe_config ffe_config_state;
uint32_t asf_firmware_present;
uint32_t eeprom_semaphore_present;
unsigned long io_base;
uint32_t phy_id;
uint32_t phy_revision;
@ -1009,6 +1302,8 @@ struct e1000_hw {
uint32_t ledctl_default;
uint32_t ledctl_mode1;
uint32_t ledctl_mode2;
boolean_t tx_pkt_filtering;
struct e1000_host_mng_dhcp_cookie mng_cookie;
uint16_t phy_spd_default;
uint16_t autoneg_advertised;
uint16_t pci_cmd_word;
@ -1047,16 +1342,24 @@ struct e1000_hw {
boolean_t adaptive_ifs;
boolean_t ifs_params_forced;
boolean_t in_ifs_mode;
boolean_t mng_reg_access_disabled;
};
#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
/* Register Bit Masks */
/* Device Control */
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
@ -1070,6 +1373,7 @@ struct e1000_hw {
#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
@ -1089,6 +1393,7 @@ struct e1000_hw {
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
@ -1098,6 +1403,8 @@ struct e1000_hw {
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
@ -1128,6 +1435,18 @@ struct e1000_hw {
#ifndef E1000_EEPROM_GRANT_ATTEMPTS
#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_STM_OPCODE 0xDB00
#define E1000_HICR_FW_RESET 0xC0
/* EEPROM Read */
#define E1000_EERD_START 0x00000001 /* Start Read */
@ -1171,6 +1490,8 @@ struct e1000_hw {
#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
/* MDI Control */
#define E1000_MDIC_DATA_MASK 0x0000FFFF
@ -1187,14 +1508,17 @@ struct e1000_hw {
/* LED Control */
#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020
#define E1000_LEDCTL_LED0_IVRT 0x00000040
#define E1000_LEDCTL_LED0_BLINK 0x00000080
#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00
#define E1000_LEDCTL_LED1_MODE_SHIFT 8
#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000
#define E1000_LEDCTL_LED1_IVRT 0x00004000
#define E1000_LEDCTL_LED1_BLINK 0x00008000
#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000
#define E1000_LEDCTL_LED2_MODE_SHIFT 16
#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000
#define E1000_LEDCTL_LED2_IVRT 0x00400000
#define E1000_LEDCTL_LED2_BLINK 0x00800000
#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
@ -1238,6 +1562,10 @@ struct e1000_hw {
#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
#define E1000_ICR_TXD_LOW 0x00008000
#define E1000_ICR_SRPD 0x00010000
#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
#define E1000_ICR_MNG 0x00040000 /* Manageability event */
#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
/* Interrupt Cause Set */
#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@ -1255,6 +1583,9 @@ struct e1000_hw {
#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_ICS_SRPD E1000_ICR_SRPD
#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@ -1272,6 +1603,9 @@ struct e1000_hw {
#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_IMS_SRPD E1000_ICR_SRPD
#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
/* Interrupt Mask Clear */
#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@ -1289,6 +1623,9 @@ struct e1000_hw {
#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_IMC_SRPD E1000_ICR_SRPD
#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
/* Receive Control */
#define E1000_RCTL_RST 0x00000001 /* Software reset */
@ -1301,6 +1638,8 @@ struct e1000_hw {
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
@ -1327,6 +1666,34 @@ struct e1000_hw {
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
* ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
* E1000_PSRCTL_BSIZE1_MASK) |
* ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
* E1000_PSRCTL_BSIZE2_MASK) |
* ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
* E1000_PSRCTL_BSIZE3_MASK))
* where value0 = [128..16256], default=256
* value1 = [1024..64512], default=4096
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
/* Receive Descriptor */
#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
@ -1341,6 +1708,23 @@ struct e1000_hw {
#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
/* Header split receive */
#define E1000_RFCTL_ISCSI_DIS 0x00000001
#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E
#define E1000_RFCTL_ISCSI_DWC_SHIFT 1
#define E1000_RFCTL_NFSW_DIS 0x00000040
#define E1000_RFCTL_NFSR_DIS 0x00000080
#define E1000_RFCTL_NFS_VER_MASK 0x00000300
#define E1000_RFCTL_NFS_VER_SHIFT 8
#define E1000_RFCTL_IPV6_DIS 0x00000400
#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800
#define E1000_RFCTL_ACK_DIS 0x00001000
#define E1000_RFCTL_ACKD_DIS 0x00002000
#define E1000_RFCTL_IPFRSP_DIS 0x00004000
#define E1000_RFCTL_EXTEN 0x00008000
#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
/* Receive Descriptor Control */
#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
@ -1354,6 +1738,8 @@ struct e1000_hw {
#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
still to be processed. */
/* Transmit Configuration Word */
#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
@ -1387,12 +1773,16 @@ struct e1000_hw {
#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
/* Receive Checksum Control */
#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
@ -1411,6 +1801,7 @@ struct e1000_hw {
#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
@ -1446,13 +1837,19 @@ struct e1000_hw {
#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
* Filtering */
#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
* filtering */
#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
* memory */
#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
* filtering */
#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
@ -1463,11 +1860,97 @@ struct e1000_hw {
#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
/* FW Semaphore Register */
#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
#define E1000_FWSM_MODE_SHIFT 1
#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
/* FFLT Debug Register */
#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
typedef enum {
e1000_mng_mode_none = 0,
e1000_mng_mode_asf,
e1000_mng_mode_pt,
e1000_mng_mode_ipmi,
e1000_mng_mode_host_interface_only
} e1000_mng_mode;
/* Host Inteface Control Register */
#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
* to put command in RAM */
#define E1000_HICR_SV 0x00000004 /* Status Validity */
#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
struct e1000_host_command_header {
uint8_t command_id;
uint8_t command_length;
uint8_t command_options; /* I/F bits for command, status for return */
uint8_t checksum;
};
struct e1000_host_command_info {
struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
};
/* Host SMB register #0 */
#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
/* Host SMB register #1 */
#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN
#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT
#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
/* FW Status Register */
#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
/* Wake Up Packet Length */
#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
#define E1000_MDALIGN 4096
#define E1000_GCR_BEM32 0x00400000
/* Function Active and Power State to MNG */
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
#define E1000_FACTPS_LAN0_VALID 0x00000004
#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008
#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0
#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6
#define E1000_FACTPS_LAN1_VALID 0x00000100
#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200
#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000
#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12
#define E1000_FACTPS_IDE_ENABLE 0x00004000
#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000
#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000
#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18
#define E1000_FACTPS_SP_ENABLE 0x00100000
#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000
#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000
#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24
#define E1000_FACTPS_IPMI_ENABLE 0x04000000
#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000
#define E1000_FACTPS_MNGCG 0x20000000
#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000
#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000
/* EEPROM Commands - Microwire */
#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
@ -1477,22 +1960,20 @@ struct e1000_hw {
/* EEPROM Commands - SPI */
#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define EEPROM_READ_OPCODE_SPI 0x3 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_SPI 0x2 /* EEPROM write opcode */
#define EEPROM_A8_OPCODE_SPI 0x8 /* opcode bit-3 = address bit-8 */
#define EEPROM_WREN_OPCODE_SPI 0x6 /* EEPROM set Write Enable latch */
#define EEPROM_WRDI_OPCODE_SPI 0x4 /* EEPROM reset Write Enable latch */
#define EEPROM_RDSR_OPCODE_SPI 0x5 /* EEPROM read Status register */
#define EEPROM_WRSR_OPCODE_SPI 0x1 /* EEPROM write Status register */
#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
/* EEPROM Size definitions */
#define EEPROM_SIZE_16KB 0x1800
#define EEPROM_SIZE_8KB 0x1400
#define EEPROM_SIZE_4KB 0x1000
#define EEPROM_SIZE_2KB 0x0C00
#define EEPROM_SIZE_1KB 0x0800
#define EEPROM_SIZE_512B 0x0400
#define EEPROM_SIZE_128B 0x0000
#define EEPROM_WORD_SIZE_SHIFT 6
#define EEPROM_SIZE_SHIFT 10
#define EEPROM_SIZE_MASK 0x1C00
/* EEPROM Word Offsets */
@ -1606,7 +2087,22 @@ struct e1000_hw {
#define IFS_MIN 40
#define IFS_RATIO 4
/* Extended Configuration Control and Size */
#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002
#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004
#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008
#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010
#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000
#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
/* PBA constants */
#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
#define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018
@ -1663,6 +2159,13 @@ struct e1000_hw {
/* Number of milliseconds we wait for auto-negotiation to complete */
#define LINK_UP_TIMEOUT 500
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
#define AUTO_READ_DONE_TIMEOUT 10
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
#define PHY_CFG_TIMEOUT 40
#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
/* The carrier extension symbol, as received by the NIC. */
@ -1763,6 +2266,7 @@ struct e1000_hw {
#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
#define IGP02E1000_PHY_POWER_MGMT 0x19
#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
/* IGP01E1000 AGC Registers - stores the cable length values*/
@ -1771,12 +2275,20 @@ struct e1000_hw {
#define IGP01E1000_PHY_AGC_C 0x1472
#define IGP01E1000_PHY_AGC_D 0x1872
/* IGP02E1000 AGC Registers for cable length values */
#define IGP02E1000_PHY_AGC_A 0x11B1
#define IGP02E1000_PHY_AGC_B 0x12B1
#define IGP02E1000_PHY_AGC_C 0x14B1
#define IGP02E1000_PHY_AGC_D 0x18B1
/* IGP01E1000 DSP Reset Register */
#define IGP01E1000_PHY_DSP_RESET 0x1F33
#define IGP01E1000_PHY_DSP_SET 0x1F71
#define IGP01E1000_PHY_DSP_FFE 0x1F35
#define IGP01E1000_PHY_CHANNEL_NUM 4
#define IGP02E1000_PHY_CHANNEL_NUM 4
#define IGP01E1000_PHY_AGC_PARAM_A 0x1171
#define IGP01E1000_PHY_AGC_PARAM_B 0x1271
#define IGP01E1000_PHY_AGC_PARAM_C 0x1471
@ -2060,20 +2572,30 @@ struct e1000_hw {
#define IGP01E1000_MSE_CHANNEL_B 0x0F00
#define IGP01E1000_MSE_CHANNEL_A 0xF000
#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
/* IGP01E1000 DSP reset macros */
#define DSP_RESET_ENABLE 0x0
#define DSP_RESET_DISABLE 0x2
#define E1000_MAX_DSP_RESETS 10
/* IGP01E1000 AGC Registers */
/* IGP01E1000 & IGP02E1000 AGC Registers */
#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
/* IGP02E1000 AGC Register Length 9-bit mask */
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128
/* The precision of the length is +/- 10 meters */
/* The precision error of the cable length is +/- 10 meters */
#define IGP01E1000_AGC_RANGE 10
#define IGP02E1000_AGC_RANGE 10
/* IGP01E1000 PCS Initialization register */
/* bits 3:6 in the PCS registers stores the channels polarity */
@ -2113,6 +2635,8 @@ struct e1000_hw {
#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
#define L1LXT971A_PHY_ID 0x001378E0
/* Miscellaneous PHY bit definitions. */
#define PHY_PREAMBLE 0xFFFFFFFF

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@ -42,7 +42,12 @@
#include <linux/sched.h>
#ifndef msec_delay
#define msec_delay(x) msleep(x)
#define msec_delay(x) do { if(in_interrupt()) { \
/* Don't mdelay in interrupt context! */ \
BUG(); \
} else { \
msleep(x); \
} } while(0)
/* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
@ -96,6 +101,29 @@ typedef enum {
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 2)))
#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
writew((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1))))
#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
readw((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1)))
#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
writeb((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset))))
#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
readb((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset)))
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
#endif /* _E1000_OSDEP_H_ */

View file

@ -1,7 +1,7 @@
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@ -478,7 +478,6 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
opt.name);
break;
case -1:
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);

View file

@ -81,6 +81,7 @@
* cause DMA to kfree'd memory.
* 0.31: 14 Nov 2004: ethtool support for getting/setting link
* capabilities.
* 0.32: 16 Apr 2005: RX_ERROR4 handling added.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@ -92,7 +93,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
#define FORCEDETH_VERSION "0.31"
#define FORCEDETH_VERSION "0.32"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@ -109,6 +110,7 @@
#include <linux/mii.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/if_vlan.h>
#include <asm/irq.h>
#include <asm/io.h>
@ -1013,6 +1015,59 @@ static void nv_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
}
/*
* Called when the nic notices a mismatch between the actual data len on the
* wire and the len indicated in the 802 header
*/
static int nv_getlen(struct net_device *dev, void *packet, int datalen)
{
int hdrlen; /* length of the 802 header */
int protolen; /* length as stored in the proto field */
/* 1) calculate len according to header */
if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
hdrlen = VLAN_HLEN;
} else {
protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
hdrlen = ETH_HLEN;
}
dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
dev->name, datalen, protolen, hdrlen);
if (protolen > ETH_DATA_LEN)
return datalen; /* Value in proto field not a len, no checks possible */
protolen += hdrlen;
/* consistency checks: */
if (datalen > ETH_ZLEN) {
if (datalen >= protolen) {
/* more data on wire than in 802 header, trim of
* additional data.
*/
dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
dev->name, protolen);
return protolen;
} else {
/* less data on wire than mentioned in header.
* Discard the packet.
*/
dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
dev->name);
return -1;
}
} else {
/* short packet. Accept only if 802 values are also short */
if (protolen > ETH_ZLEN) {
dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
dev->name);
return -1;
}
dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
dev->name, datalen);
return datalen;
}
}
static void nv_rx_process(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
@ -1064,7 +1119,7 @@ static void nv_rx_process(struct net_device *dev)
np->stats.rx_errors++;
goto next_pkt;
}
if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4)) {
if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
np->stats.rx_errors++;
goto next_pkt;
}
@ -1078,22 +1133,24 @@ static void nv_rx_process(struct net_device *dev)
np->stats.rx_errors++;
goto next_pkt;
}
if (Flags & NV_RX_ERROR) {
/* framing errors are soft errors, the rest is fatal. */
if (Flags & NV_RX_FRAMINGERR) {
if (Flags & NV_RX_SUBSTRACT1) {
len--;
}
} else {
if (Flags & NV_RX_ERROR4) {
len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (len < 0) {
np->stats.rx_errors++;
goto next_pkt;
}
}
/* framing errors are soft errors. */
if (Flags & NV_RX_FRAMINGERR) {
if (Flags & NV_RX_SUBSTRACT1) {
len--;
}
}
} else {
if (!(Flags & NV_RX2_DESCRIPTORVALID))
goto next_pkt;
if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4)) {
if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
np->stats.rx_errors++;
goto next_pkt;
}
@ -1107,17 +1164,19 @@ static void nv_rx_process(struct net_device *dev)
np->stats.rx_errors++;
goto next_pkt;
}
if (Flags & NV_RX2_ERROR) {
/* framing errors are soft errors, the rest is fatal. */
if (Flags & NV_RX2_FRAMINGERR) {
if (Flags & NV_RX2_SUBSTRACT1) {
len--;
}
} else {
if (Flags & NV_RX2_ERROR4) {
len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
if (len < 0) {
np->stats.rx_errors++;
goto next_pkt;
}
}
/* framing errors are soft errors */
if (Flags & NV_RX2_FRAMINGERR) {
if (Flags & NV_RX2_SUBSTRACT1) {
len--;
}
}
Flags &= NV_RX2_CHECKSUMMASK;
if (Flags == NV_RX2_CHECKSUMOK1 ||
Flags == NV_RX2_CHECKSUMOK2 ||
@ -1480,6 +1539,13 @@ static void nv_do_nic_poll(unsigned long data)
enable_irq(dev->irq);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void nv_poll_controller(struct net_device *dev)
{
nv_do_nic_poll((unsigned long) dev);
}
#endif
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = get_nvpriv(dev);
@ -1962,6 +2028,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->get_stats = nv_get_stats;
dev->change_mtu = nv_change_mtu;
dev->set_multicast_list = nv_set_multicast;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = nv_poll_controller;
#endif
SET_ETHTOOL_OPS(dev, &ops);
dev->tx_timeout = nv_tx_timeout;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;

View file

@ -924,7 +924,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
spin_lock_irqsave(&cnx->lock, flags);
if (! cnx->state & VETH_STATE_READY)
if (! (cnx->state & VETH_STATE_READY))
goto drop;
if ((skb->len - 14) > VETH_MAX_MTU)
@ -1023,6 +1023,8 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
lpmask = veth_transmit_to_many(skb, lpmask, dev);
dev->trans_start = jiffies;
if (! lpmask) {
dev_kfree_skb(skb);
} else {
@ -1262,13 +1264,18 @@ static void veth_receive(struct veth_lpar_connection *cnx,
vlan = skb->data[9];
dev = veth_dev[vlan];
if (! dev)
/* Some earlier versions of the driver sent
broadcasts down all connections, even to
lpars that weren't on the relevant vlan.
So ignore packets belonging to a vlan we're
not on. */
if (! dev) {
/*
* Some earlier versions of the driver sent
* broadcasts down all connections, even to lpars
* that weren't on the relevant vlan. So ignore
* packets belonging to a vlan we're not on.
* We can also be here if we receive packets while
* the driver is going down, because then dev is NULL.
*/
dev_kfree_skb_irq(skb);
continue;
}
port = (struct veth_port *)dev->priv;
dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
@ -1381,18 +1388,25 @@ void __exit veth_module_cleanup(void)
{
int i;
vio_unregister_driver(&veth_driver);
/* Stop the queues first to stop any new packets being sent. */
for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++)
if (veth_dev[i])
netif_stop_queue(veth_dev[i]);
/* Stop the connections before we unregister the driver. This
* ensures there's no skbs lying around holding the device open. */
for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
veth_stop_connection(i);
HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
/* Hypervisor callbacks may have scheduled more work while we
* were destroying connections. Now that we've disconnected from
* were stoping connections. Now that we've disconnected from
* the hypervisor make sure everything's finished. */
flush_scheduled_work();
vio_unregister_driver(&veth_driver);
for (i = 0; i < HVMAXARCHITECTEDLPS; ++i)
veth_destroy_connection(i);

View file

@ -110,7 +110,7 @@ struct ixgb_adapter;
#define IXGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */
/* only works for sizes that are powers of 2 */
#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))

View file

@ -411,7 +411,7 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
ixgb_cleanup_eeprom(hw);
/* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
return;
}
@ -483,7 +483,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
DEBUGOUT("ixgb_ee: Checksum invalid.\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
return (FALSE);
}
@ -579,7 +579,7 @@ ixgb_get_ee_compatibility(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->compatibility);
return (le16_to_cpu(ee_map->compatibility));
return(0);
}
@ -616,7 +616,7 @@ ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->init_ctrl_reg_1);
return (le16_to_cpu(ee_map->init_ctrl_reg_1));
return(0);
}
@ -635,7 +635,7 @@ ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->init_ctrl_reg_2);
return (le16_to_cpu(ee_map->init_ctrl_reg_2));
return(0);
}
@ -654,7 +654,7 @@ ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->subsystem_id);
return (le16_to_cpu(ee_map->subsystem_id));
return(0);
}
@ -673,7 +673,7 @@ ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->subvendor_id);
return (le16_to_cpu(ee_map->subvendor_id));
return(0);
}
@ -692,7 +692,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->device_id);
return (le16_to_cpu(ee_map->device_id));
return(0);
}
@ -711,7 +711,7 @@ ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->vendor_id);
return (le16_to_cpu(ee_map->vendor_id));
return(0);
}
@ -730,7 +730,7 @@ ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->swdpins_reg);
return (le16_to_cpu(ee_map->swdpins_reg));
return(0);
}
@ -749,7 +749,7 @@ ixgb_get_ee_d3_power(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->d3_power);
return (le16_to_cpu(ee_map->d3_power));
return(0);
}
@ -768,7 +768,7 @@ ixgb_get_ee_d0_power(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->d0_power);
return (le16_to_cpu(ee_map->d0_power));
return(0);
}

View file

@ -252,7 +252,9 @@ ixgb_get_regs(struct net_device *netdev,
uint32_t *reg_start = reg;
uint8_t i;
regs->version = (adapter->hw.device_id << 16) | adapter->hw.subsystem_id;
/* the 1 (one) below indicates an attempt at versioning, if the
* interface in ethtool or the driver this 1 should be incremented */
regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
/* General Registers */
*reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */

View file

@ -47,7 +47,7 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI;
char ixgb_driver_version[] = "1.0.95-k2"DRIVERNAPI;
char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
/* ixgb_pci_tbl - PCI Device ID Table
@ -103,6 +103,7 @@ static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
#ifdef CONFIG_IXGB_NAPI
static int ixgb_clean(struct net_device *netdev, int *budget);
static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
@ -120,33 +121,20 @@ static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
void *ptr);
static int ixgb_suspend(struct pci_dev *pdev, uint32_t state);
#ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
static void ixgb_netpoll(struct net_device *dev);
#endif
struct notifier_block ixgb_notifier_reboot = {
.notifier_call = ixgb_notify_reboot,
.next = NULL,
.priority = 0
};
/* Exported from other modules */
extern void ixgb_check_options(struct ixgb_adapter *adapter);
static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name,
.name = ixgb_driver_name,
.id_table = ixgb_pci_tbl,
.probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove),
/* Power Managment Hooks */
.suspend = NULL,
.resume = NULL
.probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove),
};
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@ -169,17 +157,12 @@ MODULE_LICENSE("GPL");
static int __init
ixgb_init_module(void)
{
int ret;
printk(KERN_INFO "%s - version %s\n",
ixgb_driver_string, ixgb_driver_version);
printk(KERN_INFO "%s\n", ixgb_copyright);
ret = pci_module_init(&ixgb_driver);
if(ret >= 0) {
register_reboot_notifier(&ixgb_notifier_reboot);
}
return ret;
return pci_module_init(&ixgb_driver);
}
module_init(ixgb_init_module);
@ -194,7 +177,6 @@ module_init(ixgb_init_module);
static void __exit
ixgb_exit_module(void)
{
unregister_reboot_notifier(&ixgb_notifier_reboot);
pci_unregister_driver(&ixgb_driver);
}
@ -224,8 +206,8 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if(atomic_dec_and_test(&adapter->irq_sem)) {
IXGB_WRITE_REG(&adapter->hw, IMS,
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_RXO | IXGB_INT_LSC);
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_LSC);
IXGB_WRITE_FLUSH(&adapter->hw);
}
}
@ -1209,10 +1191,10 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
| IXGB_CONTEXT_DESC_CMD_TSE
| IXGB_CONTEXT_DESC_CMD_IP
| IXGB_CONTEXT_DESC_CMD_TCP
| IXGB_CONTEXT_DESC_CMD_RS
| IXGB_CONTEXT_DESC_CMD_IDE
| (skb->len - (hdr_len)));
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
@ -1247,8 +1229,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
context_desc->mss = 0;
context_desc->cmd_type_len =
cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
| IXGB_TX_DESC_CMD_RS
| IXGB_TX_DESC_CMD_IDE);
| IXGB_TX_DESC_CMD_IDE);
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
@ -1273,6 +1254,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
len -= skb->data_len;
i = tx_ring->next_to_use;
@ -1526,14 +1508,33 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
void
ixgb_update_stats(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
u64 bcast = ((u64)bcast_h << 32) | bcast_l;
multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
/* fix up multicast stats by removing broadcasts */
multi -= bcast;
adapter->stats.mprcl += (multi & 0xFFFFFFFF);
adapter->stats.mprch += (multi >> 32);
adapter->stats.bprcl += bcast_l;
adapter->stats.bprch += bcast_h;
} else {
adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
}
adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
@ -1823,7 +1824,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct ixgb_rx_desc *rx_desc, *next_rxd;
struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
struct sk_buff *skb, *next_skb;
uint32_t length;
unsigned int i, j;
boolean_t cleaned = FALSE;
@ -1833,6 +1833,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
struct sk_buff *skb, *next_skb;
u8 status;
#ifdef CONFIG_IXGB_NAPI
if(*work_done >= work_to_do)
@ -1840,7 +1842,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
(*work_done)++;
#endif
status = rx_desc->status;
skb = buffer_info->skb;
prefetch(skb->data);
if(++i == rx_ring->count) i = 0;
@ -1855,7 +1859,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
next_skb = next_buffer->skb;
prefetch(next_skb);
cleaned = TRUE;
pci_unmap_single(pdev,
@ -1865,7 +1868,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
length = le16_to_cpu(rx_desc->length);
if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
/* All receives must fit into a single buffer */
@ -1873,12 +1876,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
"length<%x>\n", length);
dev_kfree_skb_irq(skb);
rx_desc->status = 0;
buffer_info->skb = NULL;
rx_desc = next_rxd;
buffer_info = next_buffer;
continue;
goto rxdesc_done;
}
if (unlikely(rx_desc->errors
@ -1887,12 +1885,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
IXGB_RX_DESC_ERRORS_RXE))) {
dev_kfree_skb_irq(skb);
rx_desc->status = 0;
buffer_info->skb = NULL;
rx_desc = next_rxd;
buffer_info = next_buffer;
continue;
goto rxdesc_done;
}
/* Good Receive */
@ -1903,7 +1896,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_IXGB_NAPI
if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
IXGB_RX_DESC_SPECIAL_VLAN_MASK);
@ -1911,7 +1904,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
netif_receive_skb(skb);
}
#else /* CONFIG_IXGB_NAPI */
if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
IXGB_RX_DESC_SPECIAL_VLAN_MASK);
@ -1921,9 +1914,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
#endif /* CONFIG_IXGB_NAPI */
netdev->last_rx = jiffies;
rxdesc_done:
/* clean up descriptor, might be written over by hw */
rx_desc->status = 0;
buffer_info->skb = NULL;
/* use prefetched values */
rx_desc = next_rxd;
buffer_info = next_buffer;
}
@ -1959,8 +1955,8 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
/* leave one descriptor unused */
while(--cleancount > 0) {
/* leave three descriptors unused */
while(--cleancount > 2) {
rx_desc = IXGB_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
@ -1987,6 +1983,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
PCI_DMA_FROMDEVICE);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
/* guarantee DD bit not set now before h/w gets descriptor
* this is the rest of the workaround for h/w double
* writeback. */
rx_desc->status = 0;
if((i & ~(num_group_tail_writes- 1)) == i) {
/* Force memory writes to complete before letting h/w
@ -2099,54 +2099,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
}
}
/**
* ixgb_notify_reboot - handles OS notification of reboot event.
* @param nb notifier block, unused
* @param event Event being passed to driver to act upon
* @param p A pointer to our net device
**/
static int
ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
{
struct pci_dev *pdev = NULL;
switch(event) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
if (pci_dev_driver(pdev) == &ixgb_driver)
ixgb_suspend(pdev, 3);
}
}
return NOTIFY_DONE;
}
/**
* ixgb_suspend - driver suspend function called from notify.
* @param pdev pci driver structure used for passing to
* @param state power state to enter
**/
static int
ixgb_suspend(struct pci_dev *pdev, uint32_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev->priv;
netif_device_detach(netdev);
if(netif_running(netdev))
ixgb_down(adapter, TRUE);
pci_save_state(pdev);
state = (state > 0) ? 3 : 0;
pci_set_power_state(pdev, state);
msec_delay(200);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
@ -2157,6 +2109,7 @@ ixgb_suspend(struct pci_dev *pdev, uint32_t state)
static void ixgb_netpoll(struct net_device *dev)
{
struct ixgb_adapter *adapter = dev->priv;
disable_irq(adapter->pdev->irq);
ixgb_intr(adapter->pdev->irq, dev, NULL);
enable_irq(adapter->pdev->irq);

View file

@ -45,8 +45,7 @@
/* Don't mdelay in interrupt context! */ \
BUG(); \
} else { \
set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout((x * HZ)/1000 + 2); \
msleep(x); \
} } while(0)
#endif

View file

@ -2433,9 +2433,9 @@ static void __set_rx_mode(struct net_device *dev)
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptMulticast | AcceptMyPhys;
for (i = 0; i < 64; i += 2) {
writew(HASH_TABLE + i, ioaddr + RxFilterAddr);
writew((mc_filter[i+1]<<8) + mc_filter[i],
ioaddr + RxFilterData);
writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
writel((mc_filter[i + 1] << 8) + mc_filter[i],
ioaddr + RxFilterData);
}
}
writel(rx_mode, ioaddr + RxFilterAddr);

View file

@ -1,4 +1,4 @@
#define _VERSION "0.20"
#define VERSION "0.22"
/* ns83820.c by Benjamin LaHaise with contributions.
*
* Questions/comments/discussion to linux-ns83820@kvack.org.
@ -63,9 +63,11 @@
* - fix missed txok introduced during performance
* tuning
* 0.20 - fix stupid RFEN thinko. i am such a smurf.
*
* 20040828 0.21 - add hardware vlan accleration
* by Neil Horman <nhorman@redhat.com>
* 20050406 0.22 - improved DAC ifdefs from Andi Kleen
* - removal of dead code from Adrian Bunk
* - fix half duplex collision behaviour
* Driver Overview
* ===============
*
@ -129,18 +131,6 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
#undef Dprintk
#define Dprintk dprintk
#if defined(CONFIG_HIGHMEM64G) || defined(__ia64__)
#define USE_64BIT_ADDR "+"
#endif
#if defined(USE_64BIT_ADDR)
#define VERSION _VERSION USE_64BIT_ADDR
#define TRY_DAC 1
#else
#define VERSION _VERSION
#define TRY_DAC 0
#endif
/* tunables */
#define RX_BUF_SIZE 1500 /* 8192 */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@ -386,22 +376,16 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
#define LINK_DOWN 0x02
#define LINK_UP 0x04
#ifdef USE_64BIT_ADDR
#define HW_ADDR_LEN 8
#define HW_ADDR_LEN sizeof(dma_addr_t)
#define desc_addr_set(desc, addr) \
do { \
u64 __addr = (addr); \
(desc)[0] = cpu_to_le32(__addr); \
(desc)[1] = cpu_to_le32(__addr >> 32); \
((desc)[0] = cpu_to_le32(addr)); \
if (HW_ADDR_LEN == 8) \
(desc)[1] = cpu_to_le32(((u64)addr) >> 32); \
} while(0)
#define desc_addr_get(desc) \
(((u64)le32_to_cpu((desc)[1]) << 32) \
| le32_to_cpu((desc)[0]))
#else
#define HW_ADDR_LEN 4
#define desc_addr_set(desc, addr) ((desc)[0] = cpu_to_le32(addr))
#define desc_addr_get(desc) (le32_to_cpu((desc)[0]))
#endif
(le32_to_cpu((desc)[0]) | \
(HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0))
#define DESC_LINK 0
#define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4)
@ -727,11 +711,23 @@ static void fastcall phy_intr(struct net_device *ndev)
speed = ((cfg / CFG_SPDSTS0) & 3);
fullduplex = (cfg & CFG_DUPSTS);
if (fullduplex)
if (fullduplex) {
new_cfg |= CFG_SB;
writel(readl(dev->base + TXCFG)
| TXCFG_CSI | TXCFG_HBI,
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
dev->base + RXCFG);
} else {
writel(readl(dev->base + TXCFG)
& ~(TXCFG_CSI | TXCFG_HBI),
dev->base + TXCFG);
writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD),
dev->base + RXCFG);
}
if ((cfg & CFG_LNKSTS) &&
((new_cfg ^ dev->CFG_cache) & CFG_MODE_1000)) {
((new_cfg ^ dev->CFG_cache) != 0)) {
writel(new_cfg, dev->base + CFG);
dev->CFG_cache = new_cfg;
}
@ -1189,7 +1185,6 @@ again:
for (;;) {
volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
u32 residue = 0;
dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
(unsigned long long)buf);
@ -1199,17 +1194,11 @@ again:
desc_addr_set(desc + DESC_BUFPTR, buf);
desc[DESC_EXTSTS] = cpu_to_le32(extsts);
cmdsts = ((nr_frags|residue) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0);
cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN;
cmdsts |= len;
desc[DESC_CMDSTS] = cpu_to_le32(cmdsts);
if (residue) {
buf += len;
len = residue;
continue;
}
if (!nr_frags)
break;
@ -1841,7 +1830,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
int using_dac = 0;
/* See if we can set the dma mask early on; failure is fatal. */
if (TRY_DAC && !pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) {
if (sizeof(dma_addr_t) == 8 &&
!pci_set_dma_mask(pci_dev, 0xffffffffffffffffULL)) {
using_dac = 1;
} else if (!pci_set_dma_mask(pci_dev, 0xffffffff)) {
using_dac = 0;
@ -1972,9 +1962,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
/* When compiled with 64 bit addressing, we must always enable
* the 64 bit descriptor format.
*/
#ifdef USE_64BIT_ADDR
dev->CFG_cache |= CFG_M64ADDR;
#endif
if (sizeof(dma_addr_t) == 8)
dev->CFG_cache |= CFG_M64ADDR;
if (using_dac)
dev->CFG_cache |= CFG_T64ADDR;

View file

@ -22,8 +22,8 @@
*************************************************************************/
#define DRV_NAME "pcnet32"
#define DRV_VERSION "1.30i"
#define DRV_RELDATE "06.28.2004"
#define DRV_VERSION "1.30j"
#define DRV_RELDATE "29.04.2005"
#define PFX DRV_NAME ": "
static const char *version =
@ -256,6 +256,7 @@ static int homepna[MAX_UNITS];
* homepna for selecting HomePNA mode for PCNet/Home 79C978.
* v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
* v1.30i 28 Jun 2004 Don Fry change to use module_param.
* v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
*/
@ -395,6 +396,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev);
static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev);
enum pci_flags_bit {
PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
@ -785,6 +787,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1)
}
clean_up:
pcnet32_purge_tx_ring(dev);
x = a->read_csr(ioaddr, 15) & 0xFFFF;
a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */

View file

@ -162,6 +162,7 @@ struct sis900_private {
struct mii_phy * mii;
struct mii_phy * first_mii; /* record the first mii structure */
unsigned int cur_phy;
struct mii_if_info mii_info;
struct timer_list timer; /* Link status detection timer. */
u8 autong_complete; /* 1: auto-negotiate complete */
@ -203,7 +204,7 @@ static int sis900_open(struct net_device *net_dev);
static int sis900_mii_probe (struct net_device * net_dev);
static void sis900_init_rxfilter (struct net_device * net_dev);
static u16 read_eeprom(long ioaddr, int location);
static u16 mdio_read(struct net_device *net_dev, int phy_id, int location);
static int mdio_read(struct net_device *net_dev, int phy_id, int location);
static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
static void sis900_timer(unsigned long data);
static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
@ -478,7 +479,13 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
sis_priv->msg_enable = sis900_debug;
else
sis_priv->msg_enable = SIS900_DEF_MSG;
sis_priv->mii_info.dev = net_dev;
sis_priv->mii_info.mdio_read = mdio_read;
sis_priv->mii_info.mdio_write = mdio_write;
sis_priv->mii_info.phy_id_mask = 0x1f;
sis_priv->mii_info.reg_num_mask = 0x1f;
/* Get Mac address according to the chip revision */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev));
if(netif_msg_probe(sis_priv))
@ -725,6 +732,8 @@ static u16 sis900_default_phy(struct net_device * net_dev)
pci_name(sis_priv->pci_dev), sis_priv->cur_phy);
}
sis_priv->mii_info.phy_id = sis_priv->cur_phy;
status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
status &= (~MII_CNTL_ISOLATE);
@ -852,7 +861,7 @@ static void mdio_reset(long mdio_addr)
* Please see SiS7014 or ICS spec
*/
static u16 mdio_read(struct net_device *net_dev, int phy_id, int location)
static int mdio_read(struct net_device *net_dev, int phy_id, int location)
{
long mdio_addr = net_dev->base_addr + mear;
int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
@ -1966,10 +1975,47 @@ static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
sis_priv->msg_enable = value;
}
static u32 sis900_get_link(struct net_device *net_dev)
{
struct sis900_private *sis_priv = net_dev->priv;
return mii_link_ok(&sis_priv->mii_info);
}
static int sis900_get_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct sis900_private *sis_priv = net_dev->priv;
spin_lock_irq(&sis_priv->lock);
mii_ethtool_gset(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return 0;
}
static int sis900_set_settings(struct net_device *net_dev,
struct ethtool_cmd *cmd)
{
struct sis900_private *sis_priv = net_dev->priv;
int rt;
spin_lock_irq(&sis_priv->lock);
rt = mii_ethtool_sset(&sis_priv->mii_info, cmd);
spin_unlock_irq(&sis_priv->lock);
return rt;
}
static int sis900_nway_reset(struct net_device *net_dev)
{
struct sis900_private *sis_priv = net_dev->priv;
return mii_nway_restart(&sis_priv->mii_info);
}
static struct ethtool_ops sis900_ethtool_ops = {
.get_drvinfo = sis900_get_drvinfo,
.get_msglevel = sis900_get_msglevel,
.set_msglevel = sis900_set_msglevel,
.get_link = sis900_get_link,
.get_settings = sis900_get_settings,
.set_settings = sis900_set_settings,
.nway_reset = sis900_nway_reset,
};
/**

View file

@ -193,6 +193,12 @@ static int aui[MAX_TLAN_BOARDS];
static int duplex[MAX_TLAN_BOARDS];
static int speed[MAX_TLAN_BOARDS];
static int boards_found;
module_param_array(aui, int, NULL, 0);
module_param_array(duplex, int, NULL, 0);
module_param_array(speed, int, NULL, 0);
MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@ -204,8 +210,13 @@ MODULE_LICENSE("GPL");
/* Turn on debugging. See Documentation/networking/tlan.txt for details */
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
static int bbuf;
module_param(bbuf, int, 0);
MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
static u8 *TLanPadBuffer;
static dma_addr_t TLanPadBufferDMA;
static char TLanSignature[] = "TLAN";
@ -2381,6 +2392,7 @@ TLan_FinishReset( struct net_device *dev )
TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
return;
}
TLan_SetMulticastList(dev);
} /* TLan_FinishReset */

View file

@ -174,6 +174,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
break;
}
spin_unlock_irqrestore(&tp->mii_lock, flags);
return;
}
/* Establish sync by sending 32 logic ones. */

View file

@ -754,7 +754,7 @@ typedef struct {
u8 zero;
u8 ssidLen;
u8 ssid[32];
u16 rssi;
u16 dBm;
#define CAP_ESS (1<<0)
#define CAP_IBSS (1<<1)
#define CAP_PRIVACY (1<<4)
@ -1125,6 +1125,9 @@ static int micsetup(struct airo_info *ai);
static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len);
static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen);
static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi);
static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
#include <linux/crypto.h>
#endif
@ -1713,6 +1716,7 @@ static int readBSSListRid(struct airo_info *ai, int first,
list->fh.dwell = le16_to_cpu(list->fh.dwell);
list->dsChannel = le16_to_cpu(list->dsChannel);
list->atimWindow = le16_to_cpu(list->atimWindow);
list->dBm = le16_to_cpu(list->dBm);
return rc;
}
@ -3245,7 +3249,10 @@ badrx:
wstats.level = 0x100 - apriv->rssi[hdr.rssi[1]].rssidBm;
else
wstats.level = (hdr.rssi[1] + 321) / 2;
wstats.updated = 3;
wstats.noise = apriv->wstats.qual.noise;
wstats.updated = IW_QUAL_LEVEL_UPDATED
| IW_QUAL_QUAL_UPDATED
| IW_QUAL_NOISE_UPDATED;
/* Update spy records */
wireless_spy_update(dev, sa, &wstats);
}
@ -3588,7 +3595,10 @@ void mpi_receive_802_11 (struct airo_info *ai)
wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm;
else
wstats.level = (hdr.rssi[1] + 321) / 2;
wstats.updated = 3;
wstats.noise = ai->wstats.qual.noise;
wstats.updated = IW_QUAL_QUAL_UPDATED
| IW_QUAL_LEVEL_UPDATED
| IW_QUAL_NOISE_UPDATED;
/* Update spy records */
wireless_spy_update(ai->dev, sa, &wstats);
}
@ -3679,7 +3689,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
status = PC4500_readrid(ai,RID_RSSI,&rssi_rid,sizeof(rssi_rid),lock);
if ( status == SUCCESS ) {
if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512);
memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
}
else {
if (ai->rssi) {
@ -5348,7 +5358,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
(int)BSSList_rid.bssid[5],
(int)BSSList_rid.ssidLen,
BSSList_rid.ssid,
(int)BSSList_rid.rssi);
(int)BSSList_rid.dBm);
ptr += sprintf(ptr, " channel = %d %s %s %s %s\n",
(int)BSSList_rid.dsChannel,
BSSList_rid.cap & CAP_ESS ? "ESS" : "",
@ -5593,6 +5603,29 @@ static void __exit airo_cleanup_module( void )
* would not work at all... - Jean II
*/
static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi)
{
if( !rssi_rid )
return 0;
return (0x100 - rssi_rid[rssi].rssidBm);
}
static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm)
{
int i;
if( !rssi_rid )
return 0;
for( i = 0; i < 256; i++ )
if (rssi_rid[i].rssidBm == dbm)
return rssi_rid[i].rssipct;
return 0;
}
static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid)
{
int quality = 0;
@ -6443,12 +6476,30 @@ static int airo_get_range(struct net_device *dev,
}
range->num_frequency = k;
/* Hum... Should put the right values there */
range->max_qual.qual = airo_get_max_quality(&cap_rid);
range->max_qual.level = 0x100 - 120; /* -120 dBm */
range->max_qual.noise = 0;
range->sensitivity = 65535;
/* Hum... Should put the right values there */
if (local->rssi)
range->max_qual.qual = 100; /* % */
else
range->max_qual.qual = airo_get_max_quality(&cap_rid);
range->max_qual.level = 0; /* 0 means we use dBm */
range->max_qual.noise = 0;
range->max_qual.updated = 0;
/* Experimental measurements - boundary 11/5.5 Mb/s */
/* Note : with or without the (local->rssi), results
* are somewhat different. - Jean II */
if (local->rssi) {
range->avg_qual.qual = 50; /* % */
range->avg_qual.level = 186; /* -70 dBm */
} else {
range->avg_qual.qual = airo_get_avg_quality(&cap_rid);
range->avg_qual.level = 176; /* -80 dBm */
}
range->avg_qual.noise = 0;
range->avg_qual.updated = 0;
for(i = 0 ; i < 8 ; i++) {
range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
if(range->bitrate[i] == 0)
@ -6508,15 +6559,6 @@ static int airo_get_range(struct net_device *dev,
range->max_retry = 65535;
range->min_r_time = 1024;
range->max_r_time = 65535 * 1024;
/* Experimental measurements - boundary 11/5.5 Mb/s */
/* Note : with or without the (local->rssi), results
* are somewhat different. - Jean II */
range->avg_qual.qual = airo_get_avg_quality(&cap_rid);
if (local->rssi)
range->avg_qual.level = 186; /* -70 dBm */
else
range->avg_qual.level = 176; /* -80 dBm */
range->avg_qual.noise = 0;
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
@ -6676,12 +6718,18 @@ static int airo_get_aplist(struct net_device *dev,
loseSync = 0;
memcpy(address[i].sa_data, BSSList.bssid, ETH_ALEN);
address[i].sa_family = ARPHRD_ETHER;
if (local->rssi)
qual[i].level = 0x100 - local->rssi[BSSList.rssi].rssidBm;
else
qual[i].level = (BSSList.rssi + 321) / 2;
qual[i].qual = qual[i].noise = 0;
qual[i].updated = 2;
if (local->rssi) {
qual[i].level = 0x100 - BSSList.dBm;
qual[i].qual = airo_dbm_to_pct( local->rssi, BSSList.dBm );
qual[i].updated = IW_QUAL_QUAL_UPDATED;
} else {
qual[i].level = (BSSList.dBm + 321) / 2;
qual[i].qual = 0;
qual[i].updated = IW_QUAL_QUAL_INVALID;
}
qual[i].noise = local->wstats.qual.noise;
qual[i].updated = IW_QUAL_LEVEL_UPDATED
| IW_QUAL_NOISE_UPDATED;
if (BSSList.index == 0xffff)
break;
}
@ -6760,7 +6808,7 @@ static int airo_set_scan(struct net_device *dev,
static inline char *airo_translate_scan(struct net_device *dev,
char *current_ev,
char *end_buf,
BSSListRid *list)
BSSListRid *bss)
{
struct airo_info *ai = dev->priv;
struct iw_event iwe; /* Temporary buffer */
@ -6771,22 +6819,22 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* First entry *MUST* be the AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, list->bssid, ETH_ALEN);
memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_ADDR_LEN);
/* Other entries will be displayed in the order we give them */
/* Add the ESSID */
iwe.u.data.length = list->ssidLen;
iwe.u.data.length = bss->ssidLen;
if(iwe.u.data.length > 32)
iwe.u.data.length = 32;
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, list->ssid);
current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, bss->ssid);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
capabilities = le16_to_cpu(list->cap);
capabilities = le16_to_cpu(bss->cap);
if(capabilities & (CAP_ESS | CAP_IBSS)) {
if(capabilities & CAP_ESS)
iwe.u.mode = IW_MODE_MASTER;
@ -6797,19 +6845,25 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add frequency */
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = le16_to_cpu(list->dsChannel);
iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
iwe.u.freq.m = frequency_list[iwe.u.freq.m] * 100000;
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
/* Add quality statistics */
iwe.cmd = IWEVQUAL;
if (ai->rssi)
iwe.u.qual.level = 0x100 - ai->rssi[list->rssi].rssidBm;
else
iwe.u.qual.level = (list->rssi + 321) / 2;
iwe.u.qual.noise = 0;
iwe.u.qual.qual = 0;
if (ai->rssi) {
iwe.u.qual.level = 0x100 - bss->dBm;
iwe.u.qual.qual = airo_dbm_to_pct( ai->rssi, bss->dBm );
iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED;
} else {
iwe.u.qual.level = (bss->dBm + 321) / 2;
iwe.u.qual.qual = 0;
iwe.u.qual.updated = IW_QUAL_QUAL_INVALID;
}
iwe.u.qual.noise = ai->wstats.qual.noise;
iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
| IW_QUAL_NOISE_UPDATED;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
/* Add encryption capability */
@ -6819,7 +6873,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, list->ssid);
current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, bss->ssid);
/* Rate : stuffing multiple values in a single event require a bit
* more of magic - Jean II */
@ -6831,10 +6885,10 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Max 8 values */
for(i = 0 ; i < 8 ; i++) {
/* NULL terminated */
if(list->rates[i] == 0)
if(bss->rates[i] == 0)
break;
/* Bit rate given in 500 kb/s units (+ 0x80) */
iwe.u.bitrate.value = ((list->rates[i] & 0x7f) * 500000);
iwe.u.bitrate.value = ((bss->rates[i] & 0x7f) * 500000);
/* Add new value to event */
current_val = iwe_stream_add_value(current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
}
@ -7153,18 +7207,22 @@ static void airo_read_wireless_stats(struct airo_info *local)
/* The status */
local->wstats.status = status_rid.mode;
/* Signal quality and co. But where is the noise level ??? */
local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid);
if (local->rssi)
local->wstats.qual.level = 0x100 - local->rssi[status_rid.sigQuality].rssidBm;
else
/* Signal quality and co */
if (local->rssi) {
local->wstats.qual.level = airo_rssi_to_dbm( local->rssi, status_rid.sigQuality );
/* normalizedSignalStrength appears to be a percentage */
local->wstats.qual.qual = status_rid.normalizedSignalStrength;
} else {
local->wstats.qual.level = (status_rid.normalizedSignalStrength + 321) / 2;
local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid);
}
local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED;
if (status_rid.len >= 124) {
local->wstats.qual.noise = 256 - status_rid.noisedBm;
local->wstats.qual.updated = 7;
local->wstats.qual.noise = 0x100 - status_rid.noisedBm;
local->wstats.qual.updated |= IW_QUAL_NOISE_UPDATED;
} else {
local->wstats.qual.noise = 0;
local->wstats.qual.updated = 3;
local->wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
}
/* Packets discarded in the wireless adapter due to wireless

View file

@ -321,6 +321,7 @@ static struct {
{ 0x01bf, 0x3302, NULL, ATMEL_FW_TYPE_502E, "Belkin F5D6020-V2" },
{ 0, 0, "BT/Voyager 1020 Laptop Adapter", ATMEL_FW_TYPE_502, "BT Voyager 1020" },
{ 0, 0, "IEEE 802.11b/Wireless LAN PC Card", ATMEL_FW_TYPE_502, "Siemens Gigaset PC Card II" },
{ 0, 0, "IEEE 802.11b/Wireless LAN Card S", ATMEL_FW_TYPE_504_2958, "Siemens Gigaset PC Card II" },
{ 0, 0, "CNet/CNWLC 11Mbps Wireless PC Card V-5", ATMEL_FW_TYPE_502E, "CNet CNWLC-811ARL" },
{ 0, 0, "Wireless/PC_CARD", ATMEL_FW_TYPE_502D, "Planet WL-3552" },
{ 0, 0, "OEM/11Mbps Wireless LAN PC Card V-3", ATMEL_FW_TYPE_502, "OEM 11Mbps WLAN PCMCIA Card" },

View file

@ -9,6 +9,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
obj-$(CONFIG_LCS) += lcs.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o
obj-$(CONFIG_CLAW) += claw.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o
qeth-$(CONFIG_PROC_FS) += qeth_proc.o
obj-$(CONFIG_QETH) += qeth.o

View file

@ -1,6 +1,6 @@
/*
*
* linux/drivers/s390/net/ctcdbug.h ($Revision: 1.4 $)
* linux/drivers/s390/net/ctcdbug.h ($Revision: 1.5 $)
*
* CTC / ESCON network driver - s390 dbf exploit.
*
@ -9,7 +9,7 @@
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
* $Revision: 1.4 $ $Date: 2004/10/15 09:26:58 $
* $Revision: 1.5 $ $Date: 2005/02/27 19:46:44 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -25,9 +25,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _CTCDBUG_H_
#define _CTCDBUG_H_
#include <asm/debug.h>
#include "ctcmain.h"
/**
* Debug Facility stuff
*/
@ -41,7 +43,7 @@
#define CTC_DBF_DATA_LEN 128
#define CTC_DBF_DATA_INDEX 3
#define CTC_DBF_DATA_NR_AREAS 1
#define CTC_DBF_DATA_LEVEL 2
#define CTC_DBF_DATA_LEVEL 3
#define CTC_DBF_TRACE_NAME "ctc_trace"
#define CTC_DBF_TRACE_LEN 16
@ -121,3 +123,5 @@ hex_dump(unsigned char *buf, size_t len)
printk("\n");
}
#endif

View file

@ -1,5 +1,5 @@
/*
* $Id: ctcmain.c,v 1.72 2005/03/17 10:51:52 ptiedem Exp $
* $Id: ctcmain.c,v 1.74 2005/03/24 09:04:17 mschwide Exp $
*
* CTC / ESCON network driver
*
@ -37,12 +37,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.72 $
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.74 $
*
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@ -74,288 +73,13 @@
#include "ctctty.h"
#include "fsm.h"
#include "cu3088.h"
#include "ctcdbug.h"
#include "ctcmain.h"
MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
MODULE_LICENSE("GPL");
/**
* CCW commands, used in this driver.
*/
#define CCW_CMD_WRITE 0x01
#define CCW_CMD_READ 0x02
#define CCW_CMD_SET_EXTENDED 0xc3
#define CCW_CMD_PREPARE 0xe3
#define CTC_PROTO_S390 0
#define CTC_PROTO_LINUX 1
#define CTC_PROTO_LINUX_TTY 2
#define CTC_PROTO_OS390 3
#define CTC_PROTO_MAX 3
#define CTC_BUFSIZE_LIMIT 65535
#define CTC_BUFSIZE_DEFAULT 32768
#define CTC_TIMEOUT_5SEC 5000
#define CTC_INITIAL_BLOCKLEN 2
#define READ 0
#define WRITE 1
#define CTC_ID_SIZE BUS_ID_SIZE+3
struct ctc_profile {
unsigned long maxmulti;
unsigned long maxcqueue;
unsigned long doios_single;
unsigned long doios_multi;
unsigned long txlen;
unsigned long tx_time;
struct timespec send_stamp;
};
/**
* Definition of one channel
*/
struct channel {
/**
* Pointer to next channel in list.
*/
struct channel *next;
char id[CTC_ID_SIZE];
struct ccw_device *cdev;
/**
* Type of this channel.
* CTC/A or Escon for valid channels.
*/
enum channel_types type;
/**
* Misc. flags. See CHANNEL_FLAGS_... below
*/
__u32 flags;
/**
* The protocol of this channel
*/
__u16 protocol;
/**
* I/O and irq related stuff
*/
struct ccw1 *ccw;
struct irb *irb;
/**
* RX/TX buffer size
*/
int max_bufsize;
/**
* Transmit/Receive buffer.
*/
struct sk_buff *trans_skb;
/**
* Universal I/O queue.
*/
struct sk_buff_head io_queue;
/**
* TX queue for collecting skb's during busy.
*/
struct sk_buff_head collect_queue;
/**
* Amount of data in collect_queue.
*/
int collect_len;
/**
* spinlock for collect_queue and collect_len
*/
spinlock_t collect_lock;
/**
* Timer for detecting unresposive
* I/O operations.
*/
fsm_timer timer;
/**
* Retry counter for misc. operations.
*/
int retry;
/**
* The finite state machine of this channel
*/
fsm_instance *fsm;
/**
* The corresponding net_device this channel
* belongs to.
*/
struct net_device *netdev;
struct ctc_profile prof;
unsigned char *trans_skb_data;
__u16 logflags;
};
#define CHANNEL_FLAGS_READ 0
#define CHANNEL_FLAGS_WRITE 1
#define CHANNEL_FLAGS_INUSE 2
#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
#define CHANNEL_FLAGS_FAILED 8
#define CHANNEL_FLAGS_WAITIRQ 16
#define CHANNEL_FLAGS_RWMASK 1
#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
#define LOG_FLAG_ILLEGALPKT 1
#define LOG_FLAG_ILLEGALSIZE 2
#define LOG_FLAG_OVERRUN 4
#define LOG_FLAG_NOMEM 8
#define CTC_LOGLEVEL_INFO 1
#define CTC_LOGLEVEL_NOTICE 2
#define CTC_LOGLEVEL_WARN 4
#define CTC_LOGLEVEL_EMERG 8
#define CTC_LOGLEVEL_ERR 16
#define CTC_LOGLEVEL_DEBUG 32
#define CTC_LOGLEVEL_CRIT 64
#define CTC_LOGLEVEL_DEFAULT \
(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
static int loglevel = CTC_LOGLEVEL_DEFAULT;
#define ctc_pr_debug(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
#define ctc_pr_info(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
#define ctc_pr_notice(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
#define ctc_pr_warn(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
#define ctc_pr_emerg(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
#define ctc_pr_err(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
#define ctc_pr_crit(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
/**
* Linked list of all detected channels.
*/
static struct channel *channels = NULL;
struct ctc_priv {
struct net_device_stats stats;
unsigned long tbusy;
/**
* The finite state machine of this interface.
*/
fsm_instance *fsm;
/**
* The protocol of this device
*/
__u16 protocol;
/**
* Timer for restarting after I/O Errors
*/
fsm_timer restart_timer;
int buffer_size;
struct channel *channel[2];
};
/**
* Definition of our link level header.
*/
struct ll_header {
__u16 length;
__u16 type;
__u16 unused;
};
#define LL_HEADER_LENGTH (sizeof(struct ll_header))
/**
* Compatibility macros for busy handling
* of network devices.
*/
static __inline__ void
ctc_clear_busy(struct net_device * dev)
{
clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
netif_wake_queue(dev);
}
static __inline__ int
ctc_test_and_set_busy(struct net_device * dev)
{
if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
netif_stop_queue(dev);
return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
}
/**
* Print Banner.
*/
static void
print_banner(void)
{
static int printed = 0;
char vbuf[] = "$Revision: 1.72 $";
char *version = vbuf;
if (printed)
return;
if ((version = strchr(version, ':'))) {
char *p = strchr(version + 1, '$');
if (p)
*p = '\0';
} else
version = " ??? ";
printk(KERN_INFO "CTC driver Version%s"
#ifdef DEBUG
" (DEBUG-VERSION, " __DATE__ __TIME__ ")"
#endif
" initialized\n", version);
printed = 1;
}
/**
* Return type of a detected device.
*/
static enum channel_types
get_channel_type(struct ccw_device_id *id)
{
enum channel_types type = (enum channel_types) id->driver_info;
if (type == channel_type_ficon)
type = channel_type_escon;
return type;
}
/**
* States of the interface statemachine.
*/
@ -371,7 +95,7 @@ enum dev_states {
/**
* MUST be always the last element!!
*/
NR_DEV_STATES
CTC_NR_DEV_STATES
};
static const char *dev_state_names[] = {
@ -399,7 +123,7 @@ enum dev_events {
/**
* MUST be always the last element!!
*/
NR_DEV_EVENTS
CTC_NR_DEV_EVENTS
};
static const char *dev_event_names[] = {
@ -476,40 +200,6 @@ enum ch_events {
NR_CH_EVENTS,
};
static const char *ch_event_names[] = {
"ccw_device success",
"ccw_device busy",
"ccw_device enodev",
"ccw_device ioerr",
"ccw_device unknown",
"Status ATTN & BUSY",
"Status ATTN",
"Status BUSY",
"Unit check remote reset",
"Unit check remote system reset",
"Unit check TX timeout",
"Unit check TX parity",
"Unit check Hardware failure",
"Unit check RX parity",
"Unit check ZERO",
"Unit check Unknown",
"SubChannel check Unknown",
"Machine check failure",
"Machine check operational",
"IRQ normal",
"IRQ final",
"Timer",
"Start",
"Stop",
};
/**
* States of the channel statemachine.
*/
@ -545,6 +235,87 @@ enum ch_states {
NR_CH_STATES,
};
static int loglevel = CTC_LOGLEVEL_DEFAULT;
/**
* Linked list of all detected channels.
*/
static struct channel *channels = NULL;
/**
* Print Banner.
*/
static void
print_banner(void)
{
static int printed = 0;
char vbuf[] = "$Revision: 1.74 $";
char *version = vbuf;
if (printed)
return;
if ((version = strchr(version, ':'))) {
char *p = strchr(version + 1, '$');
if (p)
*p = '\0';
} else
version = " ??? ";
printk(KERN_INFO "CTC driver Version%s"
#ifdef DEBUG
" (DEBUG-VERSION, " __DATE__ __TIME__ ")"
#endif
" initialized\n", version);
printed = 1;
}
/**
* Return type of a detected device.
*/
static enum channel_types
get_channel_type(struct ccw_device_id *id)
{
enum channel_types type = (enum channel_types) id->driver_info;
if (type == channel_type_ficon)
type = channel_type_escon;
return type;
}
static const char *ch_event_names[] = {
"ccw_device success",
"ccw_device busy",
"ccw_device enodev",
"ccw_device ioerr",
"ccw_device unknown",
"Status ATTN & BUSY",
"Status ATTN",
"Status BUSY",
"Unit check remote reset",
"Unit check remote system reset",
"Unit check TX timeout",
"Unit check TX parity",
"Unit check Hardware failure",
"Unit check RX parity",
"Unit check ZERO",
"Unit check Unknown",
"SubChannel check Unknown",
"Machine check failure",
"Machine check operational",
"IRQ normal",
"IRQ final",
"Timer",
"Start",
"Stop",
};
static const char *ch_state_names[] = {
"Idle",
"Stopped",
@ -1934,7 +1705,6 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
ch->cdev = cdev;
snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
ch->type = type;
loglevel = CTC_LOGLEVEL_DEFAULT;
ch->fsm = init_fsm(ch->id, ch_state_names,
ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
ch_fsm, CH_FSM_LEN, GFP_KERNEL);
@ -2697,6 +2467,7 @@ ctc_stats(struct net_device * dev)
/*
* sysfs attributes
*/
static ssize_t
buffer_show(struct device *dev, char *buf)
{
@ -2715,57 +2486,61 @@ buffer_write(struct device *dev, const char *buf, size_t count)
struct ctc_priv *priv;
struct net_device *ndev;
int bs1;
char buffer[16];
DBF_TEXT(trace, 3, __FUNCTION__);
DBF_TEXT(trace, 3, buf);
priv = dev->driver_data;
if (!priv)
if (!priv) {
DBF_TEXT(trace, 3, "bfnopriv");
return -ENODEV;
ndev = priv->channel[READ]->netdev;
if (!ndev)
return -ENODEV;
sscanf(buf, "%u", &bs1);
}
sscanf(buf, "%u", &bs1);
if (bs1 > CTC_BUFSIZE_LIMIT)
return -EINVAL;
goto einval;
if (bs1 < (576 + LL_HEADER_LENGTH + 2))
goto einval;
priv->buffer_size = bs1; // just to overwrite the default
ndev = priv->channel[READ]->netdev;
if (!ndev) {
DBF_TEXT(trace, 3, "bfnondev");
return -ENODEV;
}
if ((ndev->flags & IFF_RUNNING) &&
(bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
return -EINVAL;
if (bs1 < (576 + LL_HEADER_LENGTH + 2))
return -EINVAL;
goto einval;
priv->buffer_size = bs1;
priv->channel[READ]->max_bufsize =
priv->channel[WRITE]->max_bufsize = bs1;
priv->channel[READ]->max_bufsize = bs1;
priv->channel[WRITE]->max_bufsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
sprintf(buffer, "%d",priv->buffer_size);
DBF_TEXT(trace, 3, buffer);
return count;
einval:
DBF_TEXT(trace, 3, "buff_err");
return -EINVAL;
}
static ssize_t
loglevel_show(struct device *dev, char *buf)
{
struct ctc_priv *priv;
priv = dev->driver_data;
if (!priv)
return -ENODEV;
return sprintf(buf, "%d\n", loglevel);
}
static ssize_t
loglevel_write(struct device *dev, const char *buf, size_t count)
{
struct ctc_priv *priv;
int ll1;
DBF_TEXT(trace, 5, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
sscanf(buf, "%i", &ll1);
if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
@ -2835,27 +2610,6 @@ stats_write(struct device *dev, const char *buf, size_t count)
return count;
}
static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
static int
ctc_add_attributes(struct device *dev)
{
// device_create_file(dev, &dev_attr_buffer);
device_create_file(dev, &dev_attr_loglevel);
device_create_file(dev, &dev_attr_stats);
return 0;
}
static void
ctc_remove_attributes(struct device *dev)
{
device_remove_file(dev, &dev_attr_stats);
device_remove_file(dev, &dev_attr_loglevel);
// device_remove_file(dev, &dev_attr_buffer);
}
static void
ctc_netdev_unregister(struct net_device * dev)
@ -2899,52 +2653,6 @@ ctc_free_netdevice(struct net_device * dev, int free_dev)
#endif
}
/**
* Initialize everything of the net device except the name and the
* channel structs.
*/
static struct net_device *
ctc_init_netdevice(struct net_device * dev, int alloc_device,
struct ctc_priv *privptr)
{
if (!privptr)
return NULL;
DBF_TEXT(setup, 3, __FUNCTION__);
if (alloc_device) {
dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
if (!dev)
return NULL;
memset(dev, 0, sizeof (struct net_device));
}
dev->priv = privptr;
privptr->fsm = init_fsm("ctcdev", dev_state_names,
dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
if (privptr->fsm == NULL) {
if (alloc_device)
kfree(dev);
return NULL;
}
fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
fsm_settimer(privptr->fsm, &privptr->restart_timer);
if (dev->mtu == 0)
dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
dev->hard_start_xmit = ctc_tx;
dev->open = ctc_open;
dev->stop = ctc_close;
dev->get_stats = ctc_stats;
dev->change_mtu = ctc_change_mtu;
dev->hard_header_len = LL_HEADER_LENGTH + 2;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
SET_MODULE_OWNER(dev);
return dev;
}
static ssize_t
ctc_proto_show(struct device *dev, char *buf)
{
@ -2977,7 +2685,6 @@ ctc_proto_store(struct device *dev, const char *buf, size_t count)
return count;
}
static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
static ssize_t
ctc_type_show(struct device *dev, char *buf)
@ -2991,8 +2698,13 @@ ctc_type_show(struct device *dev, char *buf)
return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
}
static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
static struct attribute *ctc_attr[] = {
&dev_attr_protocol.attr,
&dev_attr_type.attr,
@ -3004,6 +2716,21 @@ static struct attribute_group ctc_attr_group = {
.attrs = ctc_attr,
};
static int
ctc_add_attributes(struct device *dev)
{
device_create_file(dev, &dev_attr_loglevel);
device_create_file(dev, &dev_attr_stats);
return 0;
}
static void
ctc_remove_attributes(struct device *dev)
{
device_remove_file(dev, &dev_attr_stats);
device_remove_file(dev, &dev_attr_loglevel);
}
static int
ctc_add_files(struct device *dev)
{
@ -3028,15 +2755,15 @@ ctc_remove_files(struct device *dev)
*
* @returns 0 on success, !0 on failure.
*/
static int
ctc_probe_device(struct ccwgroup_device *cgdev)
{
struct ctc_priv *priv;
int rc;
char buffer[16];
pr_debug("%s() called\n", __FUNCTION__);
DBF_TEXT(trace, 3, __FUNCTION__);
DBF_TEXT(setup, 3, __FUNCTION__);
if (!get_device(&cgdev->dev))
return -ENODEV;
@ -3060,9 +2787,69 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
cgdev->cdev[1]->handler = ctc_irq_handler;
cgdev->dev.driver_data = priv;
sprintf(buffer, "%p", priv);
DBF_TEXT(data, 3, buffer);
sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
DBF_TEXT(data, 3, buffer);
sprintf(buffer, "%p", &channels);
DBF_TEXT(data, 3, buffer);
sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
DBF_TEXT(data, 3, buffer);
return 0;
}
/**
* Initialize everything of the net device except the name and the
* channel structs.
*/
static struct net_device *
ctc_init_netdevice(struct net_device * dev, int alloc_device,
struct ctc_priv *privptr)
{
if (!privptr)
return NULL;
DBF_TEXT(setup, 3, __FUNCTION__);
if (alloc_device) {
dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
if (!dev)
return NULL;
memset(dev, 0, sizeof (struct net_device));
}
dev->priv = privptr;
privptr->fsm = init_fsm("ctcdev", dev_state_names,
dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
if (privptr->fsm == NULL) {
if (alloc_device)
kfree(dev);
return NULL;
}
fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
fsm_settimer(privptr->fsm, &privptr->restart_timer);
if (dev->mtu == 0)
dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
dev->hard_start_xmit = ctc_tx;
dev->open = ctc_open;
dev->stop = ctc_close;
dev->get_stats = ctc_stats;
dev->change_mtu = ctc_change_mtu;
dev->hard_header_len = LL_HEADER_LENGTH + 2;
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
SET_MODULE_OWNER(dev);
return dev;
}
/**
*
* Setup an interface.
@ -3081,6 +2868,7 @@ ctc_new_device(struct ccwgroup_device *cgdev)
struct ctc_priv *privptr;
struct net_device *dev;
int ret;
char buffer[16];
pr_debug("%s() called\n", __FUNCTION__);
DBF_TEXT(setup, 3, __FUNCTION__);
@ -3089,6 +2877,9 @@ ctc_new_device(struct ccwgroup_device *cgdev)
if (!privptr)
return -ENODEV;
sprintf(buffer, "%d", privptr->buffer_size);
DBF_TEXT(setup, 3, buffer);
type = get_channel_type(&cgdev->cdev[0]->id);
snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
@ -3177,9 +2968,10 @@ ctc_shutdown_device(struct ccwgroup_device *cgdev)
struct ctc_priv *priv;
struct net_device *ndev;
DBF_TEXT(trace, 3, __FUNCTION__);
DBF_TEXT(setup, 3, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = cgdev->dev.driver_data;
ndev = NULL;
if (!priv)
@ -3215,7 +3007,6 @@ ctc_shutdown_device(struct ccwgroup_device *cgdev)
channel_remove(priv->channel[READ]);
if (priv->channel[WRITE])
channel_remove(priv->channel[WRITE]);
priv->channel[READ] = priv->channel[WRITE] = NULL;
return 0;
@ -3228,7 +3019,7 @@ ctc_remove_device(struct ccwgroup_device *cgdev)
struct ctc_priv *priv;
pr_debug("%s() called\n", __FUNCTION__);
DBF_TEXT(trace, 3, __FUNCTION__);
DBF_TEXT(setup, 3, __FUNCTION__);
priv = cgdev->dev.driver_data;
if (!priv)
@ -3265,6 +3056,7 @@ static struct ccwgroup_driver ctc_group_driver = {
static void __exit
ctc_exit(void)
{
DBF_TEXT(setup, 3, __FUNCTION__);
unregister_cu3088_discipline(&ctc_group_driver);
ctc_tty_cleanup();
ctc_unregister_dbf_views();
@ -3282,6 +3074,10 @@ ctc_init(void)
{
int ret = 0;
loglevel = CTC_LOGLEVEL_DEFAULT;
DBF_TEXT(setup, 3, __FUNCTION__);
print_banner();
ret = ctc_register_dbf_views();

276
drivers/s390/net/ctcmain.h Normal file
View file

@ -0,0 +1,276 @@
/*
* $Id: ctcmain.h,v 1.4 2005/03/24 09:04:17 mschwide Exp $
*
* CTC / ESCON network driver
*
* Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
Peter Tiedemann (ptiedem@de.ibm.com)
*
*
* Documentation used:
* - Principles of Operation (IBM doc#: SA22-7201-06)
* - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
* - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
* - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
* - ESCON I/O Interface (IBM doc#: SA22-7202-029
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.4 $
*
*/
#ifndef _CTCMAIN_H_
#define _CTCMAIN_H_
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include "ctctty.h"
#include "fsm.h"
#include "cu3088.h"
/**
* CCW commands, used in this driver.
*/
#define CCW_CMD_WRITE 0x01
#define CCW_CMD_READ 0x02
#define CCW_CMD_SET_EXTENDED 0xc3
#define CCW_CMD_PREPARE 0xe3
#define CTC_PROTO_S390 0
#define CTC_PROTO_LINUX 1
#define CTC_PROTO_LINUX_TTY 2
#define CTC_PROTO_OS390 3
#define CTC_PROTO_MAX 3
#define CTC_BUFSIZE_LIMIT 65535
#define CTC_BUFSIZE_DEFAULT 32768
#define CTC_TIMEOUT_5SEC 5000
#define CTC_INITIAL_BLOCKLEN 2
#define READ 0
#define WRITE 1
#define CTC_ID_SIZE BUS_ID_SIZE+3
struct ctc_profile {
unsigned long maxmulti;
unsigned long maxcqueue;
unsigned long doios_single;
unsigned long doios_multi;
unsigned long txlen;
unsigned long tx_time;
struct timespec send_stamp;
};
/**
* Definition of one channel
*/
struct channel {
/**
* Pointer to next channel in list.
*/
struct channel *next;
char id[CTC_ID_SIZE];
struct ccw_device *cdev;
/**
* Type of this channel.
* CTC/A or Escon for valid channels.
*/
enum channel_types type;
/**
* Misc. flags. See CHANNEL_FLAGS_... below
*/
__u32 flags;
/**
* The protocol of this channel
*/
__u16 protocol;
/**
* I/O and irq related stuff
*/
struct ccw1 *ccw;
struct irb *irb;
/**
* RX/TX buffer size
*/
int max_bufsize;
/**
* Transmit/Receive buffer.
*/
struct sk_buff *trans_skb;
/**
* Universal I/O queue.
*/
struct sk_buff_head io_queue;
/**
* TX queue for collecting skb's during busy.
*/
struct sk_buff_head collect_queue;
/**
* Amount of data in collect_queue.
*/
int collect_len;
/**
* spinlock for collect_queue and collect_len
*/
spinlock_t collect_lock;
/**
* Timer for detecting unresposive
* I/O operations.
*/
fsm_timer timer;
/**
* Retry counter for misc. operations.
*/
int retry;
/**
* The finite state machine of this channel
*/
fsm_instance *fsm;
/**
* The corresponding net_device this channel
* belongs to.
*/
struct net_device *netdev;
struct ctc_profile prof;
unsigned char *trans_skb_data;
__u16 logflags;
};
#define CHANNEL_FLAGS_READ 0
#define CHANNEL_FLAGS_WRITE 1
#define CHANNEL_FLAGS_INUSE 2
#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
#define CHANNEL_FLAGS_FAILED 8
#define CHANNEL_FLAGS_WAITIRQ 16
#define CHANNEL_FLAGS_RWMASK 1
#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
#define LOG_FLAG_ILLEGALPKT 1
#define LOG_FLAG_ILLEGALSIZE 2
#define LOG_FLAG_OVERRUN 4
#define LOG_FLAG_NOMEM 8
#define CTC_LOGLEVEL_INFO 1
#define CTC_LOGLEVEL_NOTICE 2
#define CTC_LOGLEVEL_WARN 4
#define CTC_LOGLEVEL_EMERG 8
#define CTC_LOGLEVEL_ERR 16
#define CTC_LOGLEVEL_DEBUG 32
#define CTC_LOGLEVEL_CRIT 64
#define CTC_LOGLEVEL_DEFAULT \
(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT)
#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1)
#define ctc_pr_debug(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0)
#define ctc_pr_info(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0)
#define ctc_pr_notice(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0)
#define ctc_pr_warn(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0)
#define ctc_pr_emerg(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0)
#define ctc_pr_err(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0)
#define ctc_pr_crit(fmt, arg...) \
do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0)
struct ctc_priv {
struct net_device_stats stats;
unsigned long tbusy;
/**
* The finite state machine of this interface.
*/
fsm_instance *fsm;
/**
* The protocol of this device
*/
__u16 protocol;
/**
* Timer for restarting after I/O Errors
*/
fsm_timer restart_timer;
int buffer_size;
struct channel *channel[2];
};
/**
* Definition of our link level header.
*/
struct ll_header {
__u16 length;
__u16 type;
__u16 unused;
};
#define LL_HEADER_LENGTH (sizeof(struct ll_header))
/**
* Compatibility macros for busy handling
* of network devices.
*/
static __inline__ void
ctc_clear_busy(struct net_device * dev)
{
clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
netif_wake_queue(dev);
}
static __inline__ int
ctc_test_and_set_busy(struct net_device * dev)
{
if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
netif_stop_queue(dev);
return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
}
#endif

View file

@ -1,5 +1,5 @@
/*
* $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $
* $Id: ctctty.c,v 1.29 2005/04/05 08:50:44 mschwide Exp $
*
* CTC / ESCON network driver, tty interface.
*
@ -1056,8 +1056,7 @@ ctc_tty_close(struct tty_struct *tty, struct file *filp)
info->tty = 0;
tty->closing = 0;
if (info->blocked_open) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/2);
msleep_interruptible(500);
wake_up_interruptible(&info->open_wait);
}
info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING);

View file

@ -1,5 +1,5 @@
/*
* $Id: cu3088.c,v 1.34 2004/06/15 13:16:27 pavlic Exp $
* $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $
*
* CTC / LCS ccw_device driver
*
@ -39,6 +39,7 @@ const char *cu3088_type[] = {
"FICON channel",
"P390 LCS card",
"OSA LCS card",
"CLAW channel device",
"unknown channel type",
"unsupported channel type",
};
@ -51,6 +52,7 @@ static struct ccw_device_id cu3088_ids[] = {
{ CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
{ CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 },
{ CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
{ CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
{ /* end of list */ }
};

View file

@ -23,6 +23,9 @@ enum channel_types {
/* Device is a OSA2 card */
channel_type_osa2,
/* Device is a CLAW channel device */
channel_type_claw,
/* Device is a channel, but we don't know
* anything about it */
channel_type_unknown,

View file

@ -1,5 +1,5 @@
/*
* $Id: iucv.c,v 1.43 2005/02/09 14:47:43 braunu Exp $
* $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $
*
* IUCV network driver
*
@ -29,7 +29,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.43 $
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $
*
*/
@ -355,7 +355,7 @@ do { \
static void
iucv_banner(void)
{
char vbuf[] = "$Revision: 1.43 $";
char vbuf[] = "$Revision: 1.45 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
@ -2553,12 +2553,12 @@ EXPORT_SYMBOL (iucv_resume);
#endif
EXPORT_SYMBOL (iucv_reply_prmmsg);
EXPORT_SYMBOL (iucv_send);
#if 0
EXPORT_SYMBOL (iucv_send2way);
EXPORT_SYMBOL (iucv_send2way_array);
EXPORT_SYMBOL (iucv_send_array);
EXPORT_SYMBOL (iucv_send2way_prmmsg);
EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
#if 0
EXPORT_SYMBOL (iucv_send_array);
EXPORT_SYMBOL (iucv_send_prmmsg);
EXPORT_SYMBOL (iucv_setmask);
#endif

View file

@ -11,7 +11,7 @@
* Frank Pavlic (pavlic@de.ibm.com) and
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* $Revision: 1.96 $ $Date: 2004/11/11 13:42:33 $
* $Revision: 1.98 $ $Date: 2005/04/18 13:41:29 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -59,7 +59,7 @@
/**
* initialization string for output
*/
#define VERSION_LCS_C "$Revision: 1.96 $"
#define VERSION_LCS_C "$Revision: 1.98 $"
static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
static char debug_buffer[255];
@ -1098,14 +1098,6 @@ lcs_check_multicast_support(struct lcs_card *card)
PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n");
return -EOPNOTSUPP;
}
/* Print out supported assists: IPv6 */
PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
"with" : "without");
/* Print out supported assist: Multicast */
PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
"with" : "without");
if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
return 0;
return -EOPNOTSUPP;
@ -1160,7 +1152,7 @@ list_modified:
}
}
/* re-insert all entries from the failed_list into ipm_list */
list_for_each_entry(ipm, &failed_list, list) {
list_for_each_entry_safe(ipm, tmp, &failed_list, list) {
list_del_init(&ipm->list);
list_add_tail(&ipm->list, &card->ipm_list);
}
@ -2198,30 +2190,39 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
if (!dev)
goto out;
card->dev = dev;
netdev_out:
card->dev->priv = card;
card->dev->open = lcs_open_device;
card->dev->stop = lcs_stop_device;
card->dev->hard_start_xmit = lcs_start_xmit;
card->dev->get_stats = lcs_getstats;
SET_MODULE_OWNER(dev);
if (lcs_register_netdev(ccwgdev) != 0)
goto out;
memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
card->dev->set_multicast_list = lcs_set_multicast_list;
#endif
netif_stop_queue(card->dev);
netdev_out:
lcs_set_allowed_threads(card,0xffffffff);
if (recover_state == DEV_STATE_RECOVER) {
lcs_set_multicast_list(card->dev);
card->dev->flags |= IFF_UP;
netif_wake_queue(card->dev);
card->state = DEV_STATE_UP;
} else
} else {
lcs_stopcard(card);
}
if (lcs_register_netdev(ccwgdev) != 0)
goto out;
/* Print out supported assists: IPv6 */
PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
"with" : "without");
/* Print out supported assist: Multicast */
PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
"with" : "without");
return 0;
out:

View file

@ -24,7 +24,7 @@
#include "qeth_mpc.h"
#define VERSION_QETH_H "$Revision: 1.135 $"
#define VERSION_QETH_H "$Revision: 1.139 $"
#ifdef CONFIG_QETH_IPV6
#define QETH_VERSION_IPV6 ":IPv6"
@ -288,7 +288,8 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
#define QETH_TX_TIMEOUT 100 * HZ
#define QETH_HEADER_SIZE 32
#define MAX_PORTNO 15
#define QETH_FAKE_LL_LEN ETH_HLEN
#define QETH_FAKE_LL_LEN_ETH ETH_HLEN
#define QETH_FAKE_LL_LEN_TR (sizeof(struct trh_hdr)-TR_MAXRIFLEN+sizeof(struct trllc))
#define QETH_FAKE_LL_V6_ADDR_POS 24
/*IPv6 address autoconfiguration stuff*/
@ -369,6 +370,25 @@ struct qeth_hdr {
} hdr;
} __attribute__ ((packed));
/*TCP Segmentation Offload header*/
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
__u8 imb_hdr_no;
__u8 reserved;
__u8 hdr_type;
__u8 hdr_version;
__u16 hdr_len;
__u32 payload_len;
__u16 mss;
__u16 dg_hdr_len;
__u8 padding[16];
} __attribute__ ((packed));
struct qeth_hdr_tso {
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
struct qeth_hdr_ext_tso ext;
} __attribute__ ((packed));
/* flags for qeth_hdr.flags */
#define QETH_HDR_PASSTHRU 0x10
@ -866,6 +886,7 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
return hdr;
}
inline static int
qeth_get_hlen(__u8 link_type)
{
@ -873,19 +894,19 @@ qeth_get_hlen(__u8 link_type)
switch (link_type) {
case QETH_LINK_TYPE_HSTR:
case QETH_LINK_TYPE_LANE_TR:
return sizeof(struct qeth_hdr) + TR_HLEN;
return sizeof(struct qeth_hdr_tso) + TR_HLEN;
default:
#ifdef CONFIG_QETH_VLAN
return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN;
return sizeof(struct qeth_hdr_tso) + VLAN_ETH_HLEN;
#else
return sizeof(struct qeth_hdr) + ETH_HLEN;
return sizeof(struct qeth_hdr_tso) + ETH_HLEN;
#endif
}
#else /* CONFIG_QETH_IPV6 */
#ifdef CONFIG_QETH_VLAN
return sizeof(struct qeth_hdr) + VLAN_HLEN;
return sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
#else
return sizeof(struct qeth_hdr);
return sizeof(struct qeth_hdr_tso);
#endif
#endif /* CONFIG_QETH_IPV6 */
}

View file

@ -1,6 +1,6 @@
/*
*
* linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $)
* linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.13 $)
*
* Enhanced Device Driver Packing (EDDP) support for the qeth driver.
*
@ -8,7 +8,7 @@
*
* Author(s): Thomas Spatzier <tspat@de.ibm.com>
*
* $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $
* $Revision: 1.13 $ $Date: 2005/05/04 20:19:18 $
*
*/
#include <linux/config.h>
@ -85,7 +85,7 @@ void
qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
{
struct qeth_eddp_context_reference *ref;
QETH_DBF_TEXT(trace, 6, "eddprctx");
while (!list_empty(&buf->ctx_list)){
ref = list_entry(buf->ctx_list.next,
@ -139,7 +139,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
"buffer!\n");
goto out;
}
}
}
/* check if the whole next skb fits into current buffer */
if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
buf->next_element_to_fill)
@ -152,7 +152,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
* and increment ctx's refcnt */
must_refcnt = 1;
continue;
}
}
if (must_refcnt){
must_refcnt = 0;
if (qeth_eddp_buf_ref_context(buf, ctx)){
@ -202,40 +202,29 @@ out:
return flush_cnt;
}
static inline int
qeth_get_skb_data_len(struct sk_buff *skb)
{
int len = skb->len;
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
len -= skb_shinfo(skb)->frags[i].size;
return len;
}
static inline void
qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
struct qeth_eddp_data *eddp)
struct qeth_eddp_data *eddp, int data_len)
{
u8 *page;
int page_remainder;
int page_offset;
int hdr_len;
int pkt_len;
struct qeth_eddp_element *element;
QETH_DBF_TEXT(trace, 5, "eddpcrsh");
page = ctx->pages[ctx->offset >> PAGE_SHIFT];
page_offset = ctx->offset % PAGE_SIZE;
element = &ctx->elements[ctx->num_elements];
hdr_len = eddp->nhl + eddp->thl;
pkt_len = eddp->nhl + eddp->thl + data_len;
/* FIXME: layer2 and VLAN !!! */
if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
hdr_len += ETH_HLEN;
pkt_len += ETH_HLEN;
if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
hdr_len += VLAN_HLEN;
/* does complete header fit in current page ? */
pkt_len += VLAN_HLEN;
/* does complete packet fit in current page ? */
page_remainder = PAGE_SIZE - page_offset;
if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){
if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
/* no -> go to start of next page */
ctx->offset += page_remainder;
page = ctx->pages[ctx->offset >> PAGE_SHIFT];
@ -281,7 +270,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
int left_in_frag;
int copy_len;
u8 *src;
QETH_DBF_TEXT(trace, 5, "eddpcdtc");
if (skb_shinfo(eddp->skb)->nr_frags == 0) {
memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
@ -292,7 +281,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
while (len > 0) {
if (eddp->frag < 0) {
/* we're in skb->data */
left_in_frag = qeth_get_skb_data_len(eddp->skb)
left_in_frag = (eddp->skb->len - eddp->skb->data_len)
- eddp->skb_offset;
src = eddp->skb->data + eddp->skb_offset;
} else {
@ -424,7 +413,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct tcphdr *tcph;
int data_len;
u32 hcsum;
QETH_DBF_TEXT(trace, 5, "eddpftcp");
eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
tcph = eddp->skb->h.th;
@ -464,7 +453,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
else
hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
/* fill the next segment into the context */
qeth_eddp_create_segment_hdrs(ctx, eddp);
qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
if (eddp->skb_offset >= eddp->skb->len)
break;
@ -474,13 +463,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
eddp->th.tcp.h.seq += data_len;
}
}
static inline int
qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
struct sk_buff *skb, struct qeth_hdr *qhdr)
{
struct qeth_eddp_data *eddp = NULL;
QETH_DBF_TEXT(trace, 5, "eddpficx");
/* create our segmentation headers and copy original headers */
if (skb->protocol == ETH_P_IP)
@ -520,7 +509,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
int hdr_len)
{
int skbs_per_page;
QETH_DBF_TEXT(trace, 5, "eddpcanp");
/* can we put multiple skbs in one page? */
skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
@ -600,7 +589,7 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *qhdr)
{
struct qeth_eddp_context *ctx = NULL;
QETH_DBF_TEXT(trace, 5, "creddpct");
if (skb->protocol == ETH_P_IP)
ctx = qeth_eddp_create_context_generic(card, skb,

View file

@ -1,6 +1,6 @@
/*
*
* linux/drivers/s390/net/qeth_main.c ($Revision: 1.206 $)
* linux/drivers/s390/net/qeth_main.c ($Revision: 1.214 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
@ -12,7 +12,7 @@
* Frank Pavlic (pavlic@de.ibm.com) and
* Thomas Spatzier <tspat@de.ibm.com>
*
* $Revision: 1.206 $ $Date: 2005/03/24 09:04:18 $
* $Revision: 1.214 $ $Date: 2005/05/04 20:19:18 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -80,7 +80,7 @@ qeth_eyecatcher(void)
#include "qeth_eddp.h"
#include "qeth_tso.h"
#define VERSION_QETH_C "$Revision: 1.206 $"
#define VERSION_QETH_C "$Revision: 1.214 $"
static const char *version = "qeth S/390 OSA-Express driver";
/**
@ -158,6 +158,9 @@ qeth_irq_tasklet(unsigned long);
static int
qeth_set_online(struct ccwgroup_device *);
static int
__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode);
static struct qeth_ipaddr *
qeth_get_addr_buffer(enum qeth_prot_versions);
@ -510,10 +513,10 @@ qeth_irq_tasklet(unsigned long data)
wake_up(&card->wait_q);
}
static int qeth_stop_card(struct qeth_card *);
static int qeth_stop_card(struct qeth_card *, int);
static int
qeth_set_offline(struct ccwgroup_device *cgdev)
__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
{
struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
int rc = 0;
@ -523,7 +526,7 @@ qeth_set_offline(struct ccwgroup_device *cgdev)
QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
recover_flag = card->state;
if (qeth_stop_card(card) == -ERESTARTSYS){
if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
PRINT_WARN("Stopping card %s interrupted by user!\n",
CARD_BUS_ID(card));
return -ERESTARTSYS;
@ -539,6 +542,12 @@ qeth_set_offline(struct ccwgroup_device *cgdev)
return 0;
}
static int
qeth_set_offline(struct ccwgroup_device *cgdev)
{
return __qeth_set_offline(cgdev, 0);
}
static int
qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
@ -953,8 +962,8 @@ qeth_recover(void *ptr)
PRINT_WARN("Recovery of device %s started ...\n",
CARD_BUS_ID(card));
card->use_hard_stop = 1;
qeth_set_offline(card->gdev);
rc = qeth_set_online(card->gdev);
__qeth_set_offline(card->gdev,1);
rc = __qeth_set_online(card->gdev,1);
if (!rc)
PRINT_INFO("Device %s successfully recovered!\n",
CARD_BUS_ID(card));
@ -2152,9 +2161,15 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
if (!skb_len)
return NULL;
if (card->options.fake_ll){
if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN)))
goto no_mem;
skb_pull(skb, QETH_FAKE_LL_LEN);
if(card->dev->type == ARPHRD_IEEE802_TR){
if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR)))
goto no_mem;
skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
} else {
if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH)))
goto no_mem;
skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
}
} else if (!(skb = qeth_get_skb(skb_len)))
goto no_mem;
data_ptr = element->addr + offset;
@ -2229,14 +2244,68 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
}
static inline void
qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
struct trh_hdr *fake_hdr;
struct trllc *fake_llc;
struct iphdr *ip_hdr;
QETH_DBF_TEXT(trace,5,"skbfktr");
skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_TR;
/* this is a fake ethernet header */
fake_hdr = (struct trh_hdr *) skb->mac.raw;
/* the destination MAC address */
switch (skb->pkt_type){
case PACKET_MULTICAST:
switch (skb->protocol){
#ifdef CONFIG_QETH_IPV6
case __constant_htons(ETH_P_IPV6):
ndisc_mc_map((struct in6_addr *)
skb->data + QETH_FAKE_LL_V6_ADDR_POS,
fake_hdr->daddr, card->dev, 0);
break;
#endif /* CONFIG_QETH_IPV6 */
case __constant_htons(ETH_P_IP):
ip_hdr = (struct iphdr *)skb->data;
ip_tr_mc_map(ip_hdr->daddr, fake_hdr->daddr);
break;
default:
memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
}
break;
case PACKET_BROADCAST:
memset(fake_hdr->daddr, 0xff, TR_ALEN);
break;
default:
memcpy(fake_hdr->daddr, card->dev->dev_addr, TR_ALEN);
}
/* the source MAC address */
if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
memcpy(fake_hdr->saddr, &hdr->hdr.l3.dest_addr[2], TR_ALEN);
else
memset(fake_hdr->saddr, 0, TR_ALEN);
fake_hdr->rcf=0;
fake_llc = (struct trllc*)&(fake_hdr->rcf);
fake_llc->dsap = EXTENDED_SAP;
fake_llc->ssap = EXTENDED_SAP;
fake_llc->llc = UI_CMD;
fake_llc->protid[0] = 0;
fake_llc->protid[1] = 0;
fake_llc->protid[2] = 0;
fake_llc->ethertype = ETH_P_IP;
}
static inline void
qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
struct ethhdr *fake_hdr;
struct iphdr *ip_hdr;
QETH_DBF_TEXT(trace,5,"skbfake");
skb->mac.raw = skb->data - QETH_FAKE_LL_LEN;
QETH_DBF_TEXT(trace,5,"skbfketh");
skb->mac.raw = skb->data - QETH_FAKE_LL_LEN_ETH;
/* this is a fake ethernet header */
fake_hdr = (struct ethhdr *) skb->mac.raw;
@ -2253,10 +2322,7 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
#endif /* CONFIG_QETH_IPV6 */
case __constant_htons(ETH_P_IP):
ip_hdr = (struct iphdr *)skb->data;
if (card->dev->type == ARPHRD_IEEE802_TR)
ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
else
ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest);
break;
default:
memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN);
@ -2277,6 +2343,16 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
fake_hdr->h_proto = skb->protocol;
}
static inline void
qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
if (card->dev->type == ARPHRD_IEEE802_TR)
qeth_rebuild_skb_fake_ll_tr(card, skb, hdr);
else
qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
}
static inline void
qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
@ -3440,16 +3516,25 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr,
unsigned len)
{
struct ethhdr *hdr;
if(dev->type == ARPHRD_IEEE802_TR){
struct trh_hdr *hdr;
hdr = (struct trh_hdr *)skb_push(skb, QETH_FAKE_LL_LEN_TR);
memcpy(hdr->saddr, dev->dev_addr, TR_ALEN);
memcpy(hdr->daddr, "FAKELL", TR_ALEN);
return QETH_FAKE_LL_LEN_TR;
hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN);
memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
if (type != ETH_P_802_3)
hdr->h_proto = htons(type);
else
hdr->h_proto = htons(len);
return QETH_FAKE_LL_LEN;
} else {
struct ethhdr *hdr;
hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN_ETH);
memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN);
memcpy(hdr->h_dest, "FAKELL", ETH_ALEN);
if (type != ETH_P_802_3)
hdr->h_proto = htons(type);
else
hdr->h_proto = htons(len);
return QETH_FAKE_LL_LEN_ETH;
}
}
static inline int
@ -3710,16 +3795,12 @@ static inline int
qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
struct qeth_hdr **hdr, int ipv)
{
int rc = 0;
#ifdef CONFIG_QETH_VLAN
u16 *tag;
#endif
QETH_DBF_TEXT(trace, 6, "prepskb");
rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
if (rc)
return rc;
#ifdef CONFIG_QETH_VLAN
if (card->vlangrp && vlan_tx_tag_present(*skb) &&
((ipv == 6) || card->options.layer2) ) {
@ -3882,9 +3963,15 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16);
}
} else { /* passthrough */
if (!memcmp(skb->data + sizeof(struct qeth_hdr),
if((skb->dev->type == ARPHRD_IEEE802_TR) &&
!memcmp(skb->data + sizeof(struct qeth_hdr) +
sizeof(__u16), skb->dev->broadcast, 6)) {
hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
QETH_HDR_PASSTHRU;
} else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
skb->dev->broadcast, 6)) { /* broadcast? */
hdr->hdr.l3.flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU;
hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
QETH_HDR_PASSTHRU;
} else {
hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
@ -3893,68 +3980,30 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
}
static inline void
__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
int *next_element_to_fill)
{
int length = skb->len;
struct skb_frag_struct *frag;
int fragno;
unsigned long addr;
int element;
int first_lap = 1;
fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */
element = *next_element_to_fill + fragno;
while (length > 0) {
if (fragno > 0) {
frag = &skb_shinfo(skb)->frags[fragno - 1];
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
frag->page_offset;
buffer->element[element].addr = (char *)addr;
buffer->element[element].length = frag->size;
length -= frag->size;
if (first_lap)
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
} else {
buffer->element[element].addr = skb->data;
buffer->element[element].length = length;
length = 0;
buffer->element[element].flags =
SBAL_FLAGS_FIRST_FRAG;
}
element--;
fragno--;
first_lap = 0;
}
*next_element_to_fill += skb_shinfo(skb)->nr_frags + 1;
}
static inline void
__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
int *next_element_to_fill)
int is_tso, int *next_element_to_fill)
{
int length = skb->len;
int length_here;
int element;
char *data;
int first_lap = 1;
int first_lap ;
element = *next_element_to_fill;
data = skb->data;
first_lap = (is_tso == 0 ? 1 : 0);
while (length > 0) {
/* length_here is the remaining amount of data in this page */
length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
if (length < length_here)
length_here = length;
buffer->element[element].addr = data;
buffer->element[element].length = length_here;
length -= length_here;
if (!length){
if (!length) {
if (first_lap)
buffer->element[element].flags = 0;
else
@ -3981,17 +4030,35 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct sk_buff *skb)
{
struct qdio_buffer *buffer;
int flush_cnt = 0;
struct qeth_hdr_tso *hdr;
int flush_cnt = 0, hdr_len, large_send = 0;
QETH_DBF_TEXT(trace, 6, "qdfillbf");
buffer = buf->buffer;
atomic_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
hdr = (struct qeth_hdr_tso *) skb->data;
/*check first on TSO ....*/
if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
int element = buf->next_element_to_fill;
hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
skb->data += hdr_len;
skb->len -= hdr_len;
large_send = 1;
}
if (skb_shinfo(skb)->nr_frags == 0)
__qeth_fill_buffer(skb, buffer,
__qeth_fill_buffer(skb, buffer, large_send,
(int *)&buf->next_element_to_fill);
else
__qeth_fill_buffer_frag(skb, buffer,
__qeth_fill_buffer_frag(skb, buffer, large_send,
(int *)&buf->next_element_to_fill);
if (!queue->do_pack) {
@ -4183,6 +4250,25 @@ out:
return rc;
}
static inline int
qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
{
int elements_needed = 0;
if (skb_shinfo(skb)->nr_frags > 0) {
elements_needed = (skb_shinfo(skb)->nr_frags + 1);
}
if (elements_needed == 0 )
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
+ skb->len) >> PAGE_SHIFT);
if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
PRINT_ERR("qeth_do_send_packet: invalid size of "
"IP packet. Discarded.");
return 0;
}
return elements_needed;
}
static inline int
qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
{
@ -4205,7 +4291,11 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
dev_kfree_skb_irq(skb);
return 0;
}
skb_pull(skb, QETH_FAKE_LL_LEN);
if(card->dev->type == ARPHRD_IEEE802_TR){
skb_pull(skb, QETH_FAKE_LL_LEN_TR);
} else {
skb_pull(skb, QETH_FAKE_LL_LEN_ETH);
}
}
}
cast_type = qeth_get_cast_type(card, skb);
@ -4221,19 +4311,25 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
if (skb_shinfo(skb)->tso_size)
large_send = card->options.large_send;
if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
return rc;
}
/*are we able to do TSO ? If so ,prepare and send it from here */
if ((large_send == QETH_LARGE_SEND_TSO) &&
(cast_type == RTN_UNSPEC)) {
rc = qeth_tso_send_packet(card, skb, queue,
ipv, cast_type);
goto do_statistics;
rc = qeth_tso_prepare_packet(card, skb, ipv, cast_type);
if (rc) {
card->stats.tx_dropped++;
card->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
elements_needed++;
} else {
if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) {
QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
return rc;
}
qeth_fill_header(card, hdr, skb, ipv, cast_type);
}
qeth_fill_header(card, hdr, skb, ipv, cast_type);
if (large_send == QETH_LARGE_SEND_EDDP) {
ctx = qeth_eddp_create_context(card, skb, hdr);
if (ctx == NULL) {
@ -4241,7 +4337,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
return -EINVAL;
}
} else {
elements_needed = qeth_get_elements_no(card,(void*) hdr, skb);
elements_needed += qeth_get_elements_no(card,(void*) hdr, skb);
if (!elements_needed)
return -EINVAL;
}
@ -4252,12 +4348,12 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
else
rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
elements_needed, ctx);
do_statistics:
if (!rc){
card->stats.tx_packets++;
card->stats.tx_bytes += skb->len;
#ifdef CONFIG_QETH_PERF_STATS
if (skb_shinfo(skb)->tso_size) {
if (skb_shinfo(skb)->tso_size &&
!(large_send == QETH_LARGE_SEND_NO)) {
card->perf_stats.large_send_bytes += skb->len;
card->perf_stats.large_send_cnt++;
}
@ -7154,7 +7250,7 @@ qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
}
static int
qeth_stop_card(struct qeth_card *card)
qeth_stop_card(struct qeth_card *card, int recovery_mode)
{
int rc = 0;
@ -7167,9 +7263,13 @@ qeth_stop_card(struct qeth_card *card)
if (card->read.state == CH_STATE_UP &&
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
rtnl_lock();
dev_close(card->dev);
rtnl_unlock();
if(recovery_mode) {
qeth_stop(card->dev);
} else {
rtnl_lock();
dev_close(card->dev);
rtnl_unlock();
}
if (!card->use_hard_stop) {
__u8 *mac = &card->dev->dev_addr[0];
rc = qeth_layer2_send_delmac(card, mac);
@ -7341,13 +7441,17 @@ qeth_register_netdev(struct qeth_card *card)
}
static void
qeth_start_again(struct qeth_card *card)
qeth_start_again(struct qeth_card *card, int recovery_mode)
{
QETH_DBF_TEXT(setup ,2, "startag");
rtnl_lock();
dev_open(card->dev);
rtnl_unlock();
if(recovery_mode) {
qeth_open(card->dev);
} else {
rtnl_lock();
dev_open(card->dev);
rtnl_unlock();
}
/* this also sets saved unicast addresses */
qeth_set_multicast_list(card->dev);
}
@ -7404,7 +7508,7 @@ static void qeth_make_parameters_consistent(struct qeth_card *card)
static int
qeth_set_online(struct ccwgroup_device *gdev)
__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
{
struct qeth_card *card = gdev->dev.driver_data;
int rc = 0;
@ -7464,12 +7568,12 @@ qeth_set_online(struct ccwgroup_device *gdev)
* we can also use this state for recovery purposes*/
qeth_set_allowed_threads(card, 0xffffffff, 0);
if (recover_flag == CARD_STATE_RECOVER)
qeth_start_again(card);
qeth_start_again(card, recovery_mode);
qeth_notify_processes();
return 0;
out_remove:
card->use_hard_stop = 1;
qeth_stop_card(card);
qeth_stop_card(card, 0);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
@ -7480,6 +7584,12 @@ out_remove:
return -ENODEV;
}
static int
qeth_set_online(struct ccwgroup_device *gdev)
{
return __qeth_set_online(gdev, 0);
}
static struct ccw_device_id qeth_ids[] = {
{CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
{CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},

View file

@ -1,285 +0,0 @@
/*
* linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
*
* Header file for qeth TCP Segmentation Offload support.
*
* Copyright 2004 IBM Corporation
*
* Author(s): Frank Pavlic <pavlic@de.ibm.com>
*
* $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $
*
*/
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "qeth.h"
#include "qeth_mpc.h"
#include "qeth_tso.h"
/**
* skb already partially prepared
* classic qdio header in skb->data
* */
static inline struct qeth_hdr_tso *
qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
{
int rc = 0;
QETH_DBF_TEXT(trace, 5, "tsoprsk");
rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
if (rc)
return NULL;
return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
}
/**
* fill header for a TSO packet
*/
static inline void
qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
{
struct qeth_hdr_tso *hdr;
struct tcphdr *tcph;
struct iphdr *iph;
QETH_DBF_TEXT(trace, 5, "tsofhdr");
hdr = (struct qeth_hdr_tso *) skb->data;
iph = skb->nh.iph;
tcph = skb->h.th;
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
hdr->ext.hdr_type = 1;
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->tso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
}
/**
* change some header values as requested by hardware
*/
static inline void
qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
iph = skb->nh.iph;
ip6h = skb->nh.ipv6h;
tcph = skb->h.th;
tcph->check = 0;
if (skb->protocol == ETH_P_IPV6) {
ip6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0, IPPROTO_TCP, 0);
return;
}
/*OSA want us to set these values ...*/
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
iph->tot_len = 0;
iph->check = 0;
}
static inline struct qeth_hdr_tso *
qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
struct qeth_hdr_tso *hdr;
int rc = 0;
QETH_DBF_TEXT(trace, 5, "tsoprep");
/*get headroom for tso qdio header */
hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
if (hdr == NULL) {
QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
return NULL;
}
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
/*fill first 32 bytes of qdio header as used
*FIXME: TSO has two struct members
* with different names but same size
* */
qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
qeth_tso_fill_header(card, skb);
qeth_tso_set_tcpip_header(card, skb);
return hdr;
}
static inline int
qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
int flush_cnt = 0;
QETH_DBF_TEXT(trace, 5, "tsobuf");
/* force to non-packing*/
if (queue->do_pack)
queue->do_pack = 0;
buffer = &queue->bufs[queue->next_buf_to_fill];
/* get a new buffer if current is already in use*/
if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
flush_cnt++;
}
return flush_cnt;
}
static inline void
__qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb)
{
struct skb_frag_struct *frag;
struct qdio_buffer *buffer;
int fragno, cnt, element;
unsigned long addr;
QETH_DBF_TEXT(trace, 6, "tsfilfrg");
/*initialize variables ...*/
fragno = skb_shinfo(skb)->nr_frags;
buffer = buf->buffer;
element = buf->next_element_to_fill;
/*fill buffer elements .....*/
for (cnt = 0; cnt < fragno; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
frag->page_offset;
buffer->element[element].addr = (char *)addr;
buffer->element[element].length = frag->size;
if (cnt < (fragno - 1))
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
element++;
}
buf->next_element_to_fill = element;
}
static inline int
qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb)
{
int length, length_here, element;
int hdr_len;
struct qdio_buffer *buffer;
struct qeth_hdr_tso *hdr;
char *data;
QETH_DBF_TEXT(trace, 3, "tsfilbuf");
/*increment user count and queue skb ...*/
atomic_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*initialize all variables...*/
buffer = buf->buffer;
hdr = (struct qeth_hdr_tso *)skb->data;
hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
data = skb->data + hdr_len;
length = skb->len - hdr_len;
element = buf->next_element_to_fill;
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
if (skb_shinfo(skb)->nr_frags > 0) {
__qeth_tso_fill_buffer_frag(buf, skb);
goto out;
}
/*start filling buffer entries ...*/
element++;
while (length > 0) {
/* length_here is the remaining amount of data in this page */
length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
if (length < length_here)
length_here = length;
buffer->element[element].addr = data;
buffer->element[element].length = length_here;
length -= length_here;
if (!length)
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
data += length_here;
element++;
}
/*set the buffer to primed ...*/
buf->next_element_to_fill = element;
out:
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
return 1;
}
int
qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
int flush_cnt = 0;
struct qeth_hdr_tso *hdr;
struct qeth_qdio_out_buffer *buffer;
int start_index;
QETH_DBF_TEXT(trace, 3, "tsosend");
if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
return -ENOMEM;
/*check if skb fits in one SBAL ...*/
if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
return -EINVAL;
/*lock queue, force switching to non-packing and send it ...*/
while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED,
&queue->state));
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*check if card is too busy ...*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
card->stats.tx_dropped++;
goto out;
}
/*let's force to non-packing and get a new SBAL*/
flush_cnt += qeth_tso_get_queue_buffer(queue);
buffer = &queue->bufs[queue->next_buf_to_fill];
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
card->stats.tx_dropped++;
goto out;
}
flush_cnt += qeth_tso_fill_buffer(buffer, skb);
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (flush_cnt)
qeth_flush_buffers(queue, 0, start_index, flush_cnt);
/*do some statistics */
card->stats.tx_packets++;
card->stats.tx_bytes += skb->len;
return 0;
}

View file

@ -1,5 +1,5 @@
/*
* linux/drivers/s390/net/qeth_tso.h ($Revision: 1.4 $)
* linux/drivers/s390/net/qeth_tso.h ($Revision: 1.7 $)
*
* Header file for qeth TCP Segmentation Offload support.
*
@ -7,52 +7,148 @@
*
* Author(s): Frank Pavlic <pavlic@de.ibm.com>
*
* $Revision: 1.4 $ $Date: 2005/03/24 09:04:18 $
* $Revision: 1.7 $ $Date: 2005/05/04 20:19:18 $
*
*/
#ifndef __QETH_TSO_H__
#define __QETH_TSO_H__
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "qeth.h"
#include "qeth_mpc.h"
extern int
qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
struct qeth_qdio_out_q *, int , int);
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
__u8 imb_hdr_no;
__u8 reserved;
__u8 hdr_type;
__u8 hdr_version;
__u16 hdr_len;
__u32 payload_len;
__u16 mss;
__u16 dg_hdr_len;
__u8 padding[16];
} __attribute__ ((packed));
static inline struct qeth_hdr_tso *
qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
{
QETH_DBF_TEXT(trace, 5, "tsoprsk");
return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_tso));
}
struct qeth_hdr_tso {
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
struct qeth_hdr_ext_tso ext;
} __attribute__ ((packed));
/**
* fill header for a TSO packet
*/
static inline void
qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
{
struct qeth_hdr_tso *hdr;
struct tcphdr *tcph;
struct iphdr *iph;
/*some helper functions*/
QETH_DBF_TEXT(trace, 5, "tsofhdr");
hdr = (struct qeth_hdr_tso *) skb->data;
iph = skb->nh.iph;
tcph = skb->h.th;
/*fix header to TSO values ...*/
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
/*set values which are fix for the first approach ...*/
hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
hdr->ext.imb_hdr_no = 1;
hdr->ext.hdr_type = 1;
hdr->ext.hdr_version = 1;
hdr->ext.hdr_len = 28;
/*insert non-fix values */
hdr->ext.mss = skb_shinfo(skb)->tso_size;
hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
sizeof(struct qeth_hdr_tso));
}
/**
* change some header values as requested by hardware
*/
static inline void
qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
{
struct iphdr *iph;
struct ipv6hdr *ip6h;
struct tcphdr *tcph;
iph = skb->nh.iph;
ip6h = skb->nh.ipv6h;
tcph = skb->h.th;
tcph->check = 0;
if (skb->protocol == ETH_P_IPV6) {
ip6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0, IPPROTO_TCP, 0);
return;
}
/*OSA want us to set these values ...*/
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
iph->tot_len = 0;
iph->check = 0;
}
static inline int
qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
int elements_needed = 0;
struct qeth_hdr_tso *hdr;
if (skb_shinfo(skb)->nr_frags > 0)
elements_needed = (skb_shinfo(skb)->nr_frags + 1);
if (elements_needed == 0 )
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
+ skb->len) >> PAGE_SHIFT);
if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
PRINT_ERR("qeth_do_send_packet: invalid size of "
"IP packet. Discarded.");
return 0;
QETH_DBF_TEXT(trace, 5, "tsoprep");
hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
if (hdr == NULL) {
QETH_DBF_TEXT(trace, 4, "tsoperr");
return -ENOMEM;
}
return elements_needed;
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
/*fill first 32 bytes of qdio header as used
*FIXME: TSO has two struct members
* with different names but same size
* */
qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
qeth_tso_fill_header(card, skb);
qeth_tso_set_tcpip_header(card, skb);
return 0;
}
static inline void
__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
int is_tso, int *next_element_to_fill)
{
struct skb_frag_struct *frag;
int fragno;
unsigned long addr;
int element, cnt, dlen;
fragno = skb_shinfo(skb)->nr_frags;
element = *next_element_to_fill;
dlen = 0;
if (is_tso)
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_FIRST_FRAG;
if ( (dlen = (skb->len - skb->data_len)) ) {
buffer->element[element].addr = skb->data;
buffer->element[element].length = dlen;
element++;
}
for (cnt = 0; cnt < fragno; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
frag->page_offset;
buffer->element[element].addr = (char *)addr;
buffer->element[element].length = frag->size;
if (cnt < (fragno - 1))
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
element++;
}
*next_element_to_fill = element;
}
#endif /* __QETH_TSO_H__ */

View file

@ -179,8 +179,18 @@ static void idescsi_input_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsigne
return;
}
count = min(pc->sg->length - pc->b_count, bcount);
buf = page_address(pc->sg->page) + pc->sg->offset;
drive->hwif->atapi_input_bytes(drive, buf + pc->b_count, count);
if (PageHighMem(pc->sg->page)) {
unsigned long flags;
local_irq_save(flags);
buf = kmap_atomic(pc->sg->page, KM_IRQ0) + pc->sg->offset;
drive->hwif->atapi_input_bytes(drive, buf + pc->b_count, count);
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
local_irq_restore(flags);
} else {
buf = page_address(pc->sg->page) + pc->sg->offset;
drive->hwif->atapi_input_bytes(drive, buf + pc->b_count, count);
}
bcount -= count; pc->b_count += count;
if (pc->b_count == pc->sg->length) {
pc->sg++;
@ -201,8 +211,18 @@ static void idescsi_output_buffers (ide_drive_t *drive, idescsi_pc_t *pc, unsign
return;
}
count = min(pc->sg->length - pc->b_count, bcount);
buf = page_address(pc->sg->page) + pc->sg->offset;
drive->hwif->atapi_output_bytes(drive, buf + pc->b_count, count);
if (PageHighMem(pc->sg->page)) {
unsigned long flags;
local_irq_save(flags);
buf = kmap_atomic(pc->sg->page, KM_IRQ0) + pc->sg->offset;
drive->hwif->atapi_output_bytes(drive, buf + pc->b_count, count);
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
local_irq_restore(flags);
} else {
buf = page_address(pc->sg->page) + pc->sg->offset;
drive->hwif->atapi_output_bytes(drive, buf + pc->b_count, count);
}
bcount -= count; pc->b_count += count;
if (pc->b_count == pc->sg->length) {
pc->sg++;
@ -713,7 +733,6 @@ static void idescsi_add_settings(ide_drive_t *drive)
*/
static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
{
DRIVER(drive)->busy++;
if (drive->id && (drive->id->config & 0x0060) == 0x20)
set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags);
set_bit(IDESCSI_TRANSFORM, &scsi->transform);
@ -722,17 +741,16 @@ static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
set_bit(IDESCSI_LOG_CMD, &scsi->log);
#endif /* IDESCSI_DEBUG_LOG */
idescsi_add_settings(drive);
DRIVER(drive)->busy--;
}
static int idescsi_cleanup (ide_drive_t *drive)
static int ide_scsi_remove(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
struct Scsi_Host *scsihost = drive->driver_data;
struct ide_scsi_obj *scsi = scsihost_to_idescsi(scsihost);
struct gendisk *g = scsi->disk;
if (ide_unregister_subdriver(drive))
return 1;
ide_unregister_subdriver(drive, scsi->driver);
ide_unregister_region(g);
@ -746,7 +764,7 @@ static int idescsi_cleanup (ide_drive_t *drive)
return 0;
}
static int idescsi_attach(ide_drive_t *drive);
static int ide_scsi_probe(struct device *);
#ifdef CONFIG_PROC_FS
static ide_proc_entry_t idescsi_proc[] = {
@ -757,24 +775,22 @@ static ide_proc_entry_t idescsi_proc[] = {
# define idescsi_proc NULL
#endif
/*
* IDE subdriver functions, registered with ide.c
*/
static ide_driver_t idescsi_driver = {
.owner = THIS_MODULE,
.name = "ide-scsi",
.gen_driver = {
.name = "ide-scsi",
.bus = &ide_bus_type,
.probe = ide_scsi_probe,
.remove = ide_scsi_remove,
},
.version = IDESCSI_VERSION,
.media = ide_scsi,
.busy = 0,
.supports_dsc_overlap = 0,
.proc = idescsi_proc,
.attach = idescsi_attach,
.cleanup = idescsi_cleanup,
.do_request = idescsi_do_request,
.end_request = idescsi_end_request,
.error = idescsi_atapi_error,
.abort = idescsi_atapi_abort,
.drives = LIST_HEAD_INIT(idescsi_driver.drives),
};
static int idescsi_ide_open(struct inode *inode, struct file *filp)
@ -821,8 +837,6 @@ static struct block_device_operations idescsi_ops = {
.ioctl = idescsi_ide_ioctl,
};
static int idescsi_attach(ide_drive_t *drive);
static int idescsi_slave_configure(struct scsi_device * sdp)
{
/* Configure detected device */
@ -1095,8 +1109,9 @@ static struct scsi_host_template idescsi_template = {
.proc_name = "ide-scsi",
};
static int idescsi_attach(ide_drive_t *drive)
static int ide_scsi_probe(struct device *dev)
{
ide_drive_t *drive = to_ide_device(dev);
idescsi_scsi_t *idescsi;
struct Scsi_Host *host;
struct gendisk *g;
@ -1112,7 +1127,7 @@ static int idescsi_attach(ide_drive_t *drive)
!drive->present ||
drive->media == ide_disk ||
!(host = scsi_host_alloc(&idescsi_template,sizeof(idescsi_scsi_t))))
return 1;
return -ENODEV;
g = alloc_disk(1 << PARTN_BITS);
if (!g)
@ -1138,20 +1153,19 @@ static int idescsi_attach(ide_drive_t *drive)
idescsi->host = host;
idescsi->disk = g;
g->private_data = &idescsi->driver;
err = ide_register_subdriver(drive, &idescsi_driver);
ide_register_subdriver(drive, &idescsi_driver);
err = 0;
idescsi_setup(drive, idescsi);
g->fops = &idescsi_ops;
ide_register_region(g);
err = scsi_add_host(host, &drive->gendev);
if (!err) {
idescsi_setup (drive, idescsi);
g->fops = &idescsi_ops;
ide_register_region(g);
err = scsi_add_host(host, &drive->gendev);
if (!err) {
scsi_scan_host(host);
return 0;
}
/* fall through on error */
ide_unregister_region(g);
ide_unregister_subdriver(drive);
scsi_scan_host(host);
return 0;
}
/* fall through on error */
ide_unregister_region(g);
ide_unregister_subdriver(drive, &idescsi_driver);
put_disk(g);
out_host_put:
@ -1161,12 +1175,12 @@ out_host_put:
static int __init init_idescsi_module(void)
{
return ide_register_driver(&idescsi_driver);
return driver_register(&idescsi_driver.gen_driver);
}
static void __exit exit_idescsi_module(void)
{
ide_unregister_driver(&idescsi_driver);
driver_unregister(&idescsi_driver.gen_driver);
}
module_init(init_idescsi_module);

View file

@ -2071,7 +2071,7 @@ void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
sg = qc->sg;
sg->page = virt_to_page(buf);
sg->offset = (unsigned long) buf & ~PAGE_MASK;
sg_dma_len(sg) = buflen;
sg->length = buflen;
}
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
@ -2101,11 +2101,12 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
dma_addr_t dma_address;
dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
sg_dma_len(sg), dir);
sg->length, dir);
if (dma_mapping_error(dma_address))
return -1;
sg_dma_address(sg) = dma_address;
sg_dma_len(sg) = sg->length;
DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
@ -2310,7 +2311,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
qc->cursect++;
qc->cursg_ofs++;
if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
qc->cursg++;
qc->cursg_ofs = 0;
}
@ -2347,7 +2348,7 @@ next_page:
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes);
count = min(sg->length - qc->cursg_ofs, bytes);
/* don't cross page boundaries */
count = min(count, (unsigned int)PAGE_SIZE - offset);
@ -2358,7 +2359,7 @@ next_page:
qc->curbytes += count;
qc->cursg_ofs += count;
if (qc->cursg_ofs == sg_dma_len(sg)) {
if (qc->cursg_ofs == sg->length) {
qc->cursg++;
qc->cursg_ofs = 0;
}
@ -2371,7 +2372,7 @@ next_page:
kunmap(page);
if (bytes) {
if (qc->cursg_ofs < sg_dma_len(sg))
if (qc->cursg_ofs < sg->length)
goto next_page;
goto next_sg;
}

View file

@ -151,6 +151,8 @@ static struct ata_port_info pdc_port_info[] = {
static struct pci_device_id pdc_ata_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_2037x },
{ PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_2037x },
{ PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_2037x },
{ PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0,

View file

@ -82,6 +82,7 @@ static struct pci_device_id sil_pci_tbl[] = {
{ 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
{ 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ } /* terminate list */
};

View file

@ -386,6 +386,8 @@ static void speedtch_poll_status(struct speedtch_instance_data *instance)
if (instance->u.atm_dev->signal != ATM_PHY_SIG_LOST) {
instance->u.atm_dev->signal = ATM_PHY_SIG_LOST;
printk(KERN_NOTICE "ADSL line is down\n");
/* It'll never resync again unless we ask it to... */
speedtch_start_synchro(instance);
}
break;

View file

@ -52,6 +52,7 @@ extern void mf_clear_src(void);
extern void mf_init(void);
extern int mf_get_rtc(struct rtc_time *tm);
extern int mf_get_boot_rtc(struct rtc_time *tm);
extern int mf_set_rtc(struct rtc_time *tm);
#endif /* _ASM_PPC64_ISERIES_MF_H */

View file

@ -48,6 +48,9 @@ enum ultra_tlb_layout {
extern enum ultra_tlb_layout tlb_type;
extern int cheetah_pcache_forced_on;
extern void cheetah_enable_pcache(void);
#define sparc64_highest_locked_tlbent() \
(tlb_type == spitfire ? \
SPITFIRE_HIGHEST_LOCKED_TLBENT : \

View file

@ -21,6 +21,8 @@ struct bug_frame {
asm volatile("ud2 ; .quad %c1 ; .short %c0" :: \
"i"(__LINE__), "i" (__stringify(__FILE__)))
void out_of_line_bug(void);
#else
static inline void out_of_line_bug(void) { }
#endif
#include <asm-generic/bug.h>

View file

@ -664,7 +664,6 @@ typedef struct ide_drive_s {
struct request *rq; /* current request */
struct ide_drive_s *next; /* circular list of hwgroup drives */
struct ide_driver_s *driver;/* (ide_driver_t *) */
void *driver_data; /* extra driver data */
struct hd_driveid *id; /* drive model identification info */
struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
@ -758,6 +757,8 @@ typedef struct ide_drive_s {
struct semaphore gendev_rel_sem; /* to deal with device release() */
} ide_drive_t;
#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
#define IDE_CHIPSET_PCI_MASK \
((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx))
#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1)
@ -1086,28 +1087,20 @@ enum {
*/
typedef struct ide_driver_s {
struct module *owner;
const char *name;
const char *version;
u8 media;
unsigned busy : 1;
unsigned supports_dsc_overlap : 1;
int (*cleanup)(ide_drive_t *);
ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
int (*end_request)(ide_drive_t *, int, int);
ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8);
ide_startstop_t (*abort)(ide_drive_t *, struct request *rq);
int (*ioctl)(ide_drive_t *, struct inode *, struct file *, unsigned int, unsigned long);
ide_proc_entry_t *proc;
int (*attach)(ide_drive_t *);
void (*ata_prebuilder)(ide_drive_t *);
void (*atapi_prebuilder)(ide_drive_t *);
struct device_driver gen_driver;
struct list_head drives;
struct list_head drivers;
} ide_driver_t;
#define DRIVER(drive) ((drive)->driver)
int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long);
/*
@ -1328,8 +1321,6 @@ extern void ide_init_subdrivers(void);
void ide_init_disk(struct gendisk *, ide_drive_t *);
extern int ata_attach(ide_drive_t *);
extern int ideprobe_init(void);
extern void ide_scan_pcibus(int scan_direction) __init;
@ -1342,11 +1333,8 @@ extern void default_hwif_iops(ide_hwif_t *);
extern void default_hwif_mmiops(ide_hwif_t *);
extern void default_hwif_transport(ide_hwif_t *);
int ide_register_driver(ide_driver_t *driver);
void ide_unregister_driver(ide_driver_t *driver);
int ide_register_subdriver(ide_drive_t *, ide_driver_t *);
int ide_unregister_subdriver (ide_drive_t *drive);
int ide_replace_subdriver(ide_drive_t *drive, const char *driver);
void ide_register_subdriver(ide_drive_t *, ide_driver_t *);
void ide_unregister_subdriver(ide_drive_t *, ide_driver_t *);
#define ON_BOARD 1
#define NEVER_BOARD 0

View file

@ -36,8 +36,8 @@ struct trh_hdr {
__u8 fc; /* frame control field */
__u8 daddr[TR_ALEN]; /* destination address */
__u8 saddr[TR_ALEN]; /* source address */
__u16 rcf; /* route control field */
__u16 rseg[8]; /* routing registers */
__be16 rcf; /* route control field */
__be16 rseg[8]; /* routing registers */
};
#ifdef __KERNEL__
@ -55,7 +55,7 @@ struct trllc {
__u8 ssap; /* source SAP */
__u8 llc; /* LLC control field */
__u8 protid[3]; /* protocol id */
__u16 ethertype; /* ether type field */
__be16 ethertype; /* ether type field */
};
/* Token-Ring statistics collection data. */

View file

@ -65,9 +65,13 @@
#define ADVERTISE_SLCT 0x001f /* Selector bits */
#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */
#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */
#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */
#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */
#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */
#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */
@ -84,9 +88,13 @@
/* Link partner ability register. */
#define LPA_SLCT 0x001f /* Same as advertise selector */
#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
#define LPA_1000XFULL 0x0020 /* Can do 1000BASE-X full-duplex */
#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
#define LPA_1000XHALF 0x0040 /* Can do 1000BASE-X half-duplex */
#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
#define LPA_1000XPAUSE 0x0080 /* Can do 1000BASE-X pause */
#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
#define LPA_1000XPAUSE_ASYM 0x0100 /* Can do 1000BASE-X pause asym*/
#define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */
#define LPA_PAUSE_CAP 0x0400 /* Can pause */
#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */

View file

@ -2071,6 +2071,7 @@
#define PCI_DEVICE_ID_TIGON3_5703 0x1647
#define PCI_DEVICE_ID_TIGON3_5704 0x1648
#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649
#define PCI_DEVICE_ID_NX2_5706 0x164a
#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d
#define PCI_DEVICE_ID_TIGON3_5705 0x1653
#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654
@ -2090,6 +2091,7 @@
#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7
#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
#define PCI_DEVICE_ID_NX2_5706S 0x16aa
#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6
#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7
#define PCI_DEVICE_ID_TIGON3_5781 0x16dd

View file

@ -427,6 +427,7 @@ enum
TCA_NETEM_UNSPEC,
TCA_NETEM_CORR,
TCA_NETEM_DELAY_DIST,
TCA_NETEM_REORDER,
__TCA_NETEM_MAX,
};
@ -437,7 +438,7 @@ struct tc_netem_qopt
__u32 latency; /* added delay (us) */
__u32 limit; /* fifo limit (packets) */
__u32 loss; /* random packet loss (0=none ~0=100%) */
__u32 gap; /* re-ordering gap (0 for delay all) */
__u32 gap; /* re-ordering gap (0 for none) */
__u32 duplicate; /* random packet dup (0=none ~0=100%) */
__u32 jitter; /* random jitter in latency (us) */
};
@ -449,6 +450,12 @@ struct tc_netem_corr
__u32 dup_corr; /* duplicate correlation */
};
struct tc_netem_reorder
{
__u32 probability;
__u32 correlation;
};
#define NETEM_DIST_SCALE 8192
#endif

View file

@ -515,6 +515,8 @@ struct xfrm_dst
struct dst_entry *route;
u32 route_mtu_cached;
u32 child_mtu_cached;
u32 route_cookie;
u32 path_cookie;
};
static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)

View file

@ -47,12 +47,12 @@ static void rif_check_expire(unsigned long dummy);
* Each RIF entry we learn is kept this way
*/
struct rif_cache_s {
struct rif_cache {
unsigned char addr[TR_ALEN];
int iface;
__u16 rcf;
__u16 rseg[8];
struct rif_cache_s *next;
__be16 rcf;
__be16 rseg[8];
struct rif_cache *next;
unsigned long last_used;
unsigned char local_ring;
};
@ -64,7 +64,7 @@ struct rif_cache_s {
* up a lot.
*/
static struct rif_cache_s *rif_table[RIF_TABLE_SIZE];
static struct rif_cache *rif_table[RIF_TABLE_SIZE];
static DEFINE_SPINLOCK(rif_lock);
@ -249,7 +249,7 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
{
int slack;
unsigned int hash;
struct rif_cache_s *entry;
struct rif_cache *entry;
unsigned char *olddata;
static const unsigned char mcast_func_addr[]
= {0xC0,0x00,0x00,0x04,0x00,0x00};
@ -337,7 +337,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
{
unsigned int hash, rii_p = 0;
struct rif_cache_s *entry;
struct rif_cache *entry;
spin_lock_bh(&rif_lock);
@ -373,7 +373,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
* FIXME: We ought to keep some kind of cache size
* limiting and adjust the timers to suit.
*/
entry=kmalloc(sizeof(struct rif_cache_s),GFP_ATOMIC);
entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
if(!entry)
{
@ -435,7 +435,7 @@ static void rif_check_expire(unsigned long dummy)
spin_lock_bh(&rif_lock);
for(i =0; i < RIF_TABLE_SIZE; i++) {
struct rif_cache_s *entry, **pentry;
struct rif_cache *entry, **pentry;
pentry = rif_table+i;
while((entry=*pentry) != NULL) {
@ -467,10 +467,10 @@ static void rif_check_expire(unsigned long dummy)
#ifdef CONFIG_PROC_FS
static struct rif_cache_s *rif_get_idx(loff_t pos)
static struct rif_cache *rif_get_idx(loff_t pos)
{
int i;
struct rif_cache_s *entry;
struct rif_cache *entry;
loff_t off = 0;
for(i = 0; i < RIF_TABLE_SIZE; i++)
@ -493,7 +493,7 @@ static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int i;
struct rif_cache_s *ent = v;
struct rif_cache *ent = v;
++*pos;
@ -522,7 +522,7 @@ static void rif_seq_stop(struct seq_file *seq, void *v)
static int rif_seq_show(struct seq_file *seq, void *v)
{
int j, rcf_len, segment, brdgnmb;
struct rif_cache_s *entry = v;
struct rif_cache *entry = v;
if (v == SEQ_START_TOKEN)
seq_puts(seq,

View file

@ -113,6 +113,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
xdst = (struct xfrm_dst *)dst1;
xdst->route = &rt->u.dst;
if (rt->rt6i_node)
xdst->route_cookie = rt->rt6i_node->fn_sernum;
dst1->next = dst_prev;
dst_prev = dst1;
@ -137,6 +139,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
dst_prev->child = &rt->u.dst;
dst->path = &rt->u.dst;
if (rt->rt6i_node)
((struct xfrm_dst *)dst)->path_cookie = rt->rt6i_node->fn_sernum;
*dst_p = dst;
dst = dst_prev;

View file

@ -53,7 +53,6 @@
struct netem_sched_data {
struct Qdisc *qdisc;
struct sk_buff_head delayed;
struct timer_list timer;
u32 latency;
@ -63,11 +62,12 @@ struct netem_sched_data {
u32 gap;
u32 jitter;
u32 duplicate;
u32 reorder;
struct crndstate {
unsigned long last;
unsigned long rho;
} delay_cor, loss_cor, dup_cor;
} delay_cor, loss_cor, dup_cor, reorder_cor;
struct disttable {
u32 size;
@ -137,122 +137,68 @@ static long tabledist(unsigned long mu, long sigma,
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
}
/* Put skb in the private delayed queue. */
static int netem_delay(struct Qdisc *sch, struct sk_buff *skb)
{
struct netem_sched_data *q = qdisc_priv(sch);
psched_tdiff_t td;
psched_time_t now;
PSCHED_GET_TIME(now);
td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist);
/* Always queue at tail to keep packets in order */
if (likely(q->delayed.qlen < q->limit)) {
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
PSCHED_TADD2(now, td, cb->time_to_send);
pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb,
now, cb->time_to_send);
__skb_queue_tail(&q->delayed, skb);
return NET_XMIT_SUCCESS;
}
pr_debug("netem_delay: queue over limit %d\n", q->limit);
sch->qstats.overlimits++;
kfree_skb(skb);
return NET_XMIT_DROP;
}
/*
* Move a packet that is ready to send from the delay holding
* list to the underlying qdisc.
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
* NET_XMIT_DROP: queue length didn't change.
* NET_XMIT_SUCCESS: one skb was queued.
*/
static int netem_run(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
psched_time_t now;
PSCHED_GET_TIME(now);
skb = skb_peek(&q->delayed);
if (skb) {
const struct netem_skb_cb *cb
= (const struct netem_skb_cb *)skb->cb;
long delay
= PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
/* if more time remaining? */
if (delay > 0) {
mod_timer(&q->timer, jiffies + delay);
return 1;
}
__skb_unlink(skb, &q->delayed);
if (q->qdisc->enqueue(skb, q->qdisc)) {
sch->q.qlen--;
sch->qstats.drops++;
}
}
return 0;
}
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb;
struct sk_buff *skb2;
int ret;
int count = 1;
pr_debug("netem_enqueue skb=%p\n", skb);
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
++count;
/* Random packet drop 0 => none, ~0 => all */
if (q->loss && q->loss >= get_crandom(&q->loss_cor)) {
pr_debug("netem_enqueue: random loss\n");
if (q->loss && q->loss >= get_crandom(&q->loss_cor))
--count;
if (count == 0) {
sch->qstats.drops++;
kfree_skb(skb);
return 0; /* lie about loss so TCP doesn't know */
return NET_XMIT_DROP;
}
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) {
struct sk_buff *skb2;
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) {
struct Qdisc *qp;
/* Since one packet can generate two packets in the
* queue, the parent's qlen accounting gets confused,
* so fix it.
*/
qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent));
if (qp)
qp->q.qlen++;
sch->q.qlen++;
sch->bstats.bytes += skb2->len;
sch->bstats.packets++;
} else
sch->qstats.drops++;
}
/* If doing simple delay then gap == 0 so all packets
* go into the delayed holding queue
* otherwise if doing out of order only "1 out of gap"
* packets will be delayed.
/*
* If we need to duplicate packet, then re-insert at top of the
* qdisc tree, since parent queuer expects that only one
* skb will be queued.
*/
if (q->counter < q->gap) {
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
struct Qdisc *rootq = sch->dev->qdisc;
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
rootq->enqueue(skb2, rootq);
q->duplicate = dupsave;
}
if (q->gap == 0 /* not doing reordering */
|| q->counter < q->gap /* inside last reordering gap */
|| q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now;
PSCHED_GET_TIME(now);
PSCHED_TADD2(now, tabledist(q->latency, q->jitter,
&q->delay_cor, q->delay_dist),
cb->time_to_send);
++q->counter;
ret = q->qdisc->enqueue(skb, q->qdisc);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
* of the queue.
*/
PSCHED_GET_TIME(cb->time_to_send);
q->counter = 0;
ret = netem_delay(sch, skb);
netem_run(sch);
ret = q->qdisc->ops->requeue(skb, q->qdisc);
}
if (likely(ret == NET_XMIT_SUCCESS)) {
@ -296,22 +242,33 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
int pending;
pending = netem_run(sch);
skb = q->qdisc->dequeue(q->qdisc);
if (skb) {
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
}
else if (pending) {
pr_debug("netem_dequeue: throttling\n");
sch->flags |= TCQ_F_THROTTLED;
}
const struct netem_skb_cb *cb
= (const struct netem_skb_cb *)skb->cb;
psched_time_t now;
long delay;
return skb;
/* if more time remaining? */
PSCHED_GET_TIME(now);
delay = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now));
pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay);
if (delay <= 0) {
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
return skb;
}
mod_timer(&q->timer, jiffies + delay);
sch->flags |= TCQ_F_THROTTLED;
if (q->qdisc->ops->requeue(skb, q->qdisc) != 0)
sch->qstats.drops++;
}
return NULL;
}
static void netem_watchdog(unsigned long arg)
@ -328,8 +285,6 @@ static void netem_reset(struct Qdisc *sch)
struct netem_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
skb_queue_purge(&q->delayed);
sch->q.qlen = 0;
sch->flags &= ~TCQ_F_THROTTLED;
del_timer_sync(&q->timer);
@ -397,6 +352,19 @@ static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
return 0;
}
static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
{
struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_reorder *r = RTA_DATA(attr);
if (RTA_PAYLOAD(attr) != sizeof(*r))
return -EINVAL;
q->reorder = r->probability;
init_crandom(&q->reorder_cor, r->correlation);
return 0;
}
static int netem_change(struct Qdisc *sch, struct rtattr *opt)
{
struct netem_sched_data *q = qdisc_priv(sch);
@ -417,9 +385,15 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
q->jitter = qopt->jitter;
q->limit = qopt->limit;
q->gap = qopt->gap;
q->counter = 0;
q->loss = qopt->loss;
q->duplicate = qopt->duplicate;
/* for compatiablity with earlier versions.
* if gap is set, need to assume 100% probablity
*/
q->reorder = ~0;
/* Handle nested options after initial queue options.
* Should have put all options in nested format but too late now.
*/
@ -441,6 +415,11 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt)
if (ret)
return ret;
}
if (tb[TCA_NETEM_REORDER-1]) {
ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
if (ret)
return ret;
}
}
@ -455,11 +434,9 @@ static int netem_init(struct Qdisc *sch, struct rtattr *opt)
if (!opt)
return -EINVAL;
skb_queue_head_init(&q->delayed);
init_timer(&q->timer);
q->timer.function = netem_watchdog;
q->timer.data = (unsigned long) sch;
q->counter = 0;
q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
if (!q->qdisc) {
@ -491,6 +468,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
struct rtattr *rta = (struct rtattr *) b;
struct tc_netem_qopt qopt;
struct tc_netem_corr cor;
struct tc_netem_reorder reorder;
qopt.latency = q->latency;
qopt.jitter = q->jitter;
@ -504,6 +482,11 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
cor.loss_corr = q->loss_cor.rho;
cor.dup_corr = q->dup_cor.rho;
RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
reorder.probability = q->reorder;
reorder.correlation = q->reorder_cor.rho;
RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
rta->rta_len = skb->tail - b;
return skb->len;

View file

@ -1136,7 +1136,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
struct xfrm_dst *last;
u32 mtu;
if (!dst_check(dst->path, 0) ||
if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
(dst->dev && !netif_running(dst->dev)))
return 0;
@ -1156,7 +1156,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
xdst->child_mtu_cached = mtu;
}
if (!dst_check(xdst->route, 0))
if (!dst_check(xdst->route, xdst->route_cookie))
return 0;
mtu = dst_mtu(xdst->route);
if (xdst->route_mtu_cached != mtu) {