Merge branch 'master' into upstream

This commit is contained in:
Jeff Garzik 2006-09-20 00:48:28 -04:00
commit 783c99f42e
35 changed files with 385 additions and 197 deletions

View file

@ -1189,8 +1189,6 @@ running once the system is up.
Mechanism 2.
nommconf [IA-32,X86_64] Disable use of MMCONFIG for PCI
Configuration
mmconf [IA-32,X86_64] Force MMCONFIG. This is useful
to override the builtin blacklist.
nomsi [MSI] If the PCI_MSI kernel config parameter is
enabled, this kernel boot option can be used to
disable the use of MSI interrupts system-wide.

View file

@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 18
EXTRAVERSION = -rc7
NAME=Crazed Snow-Weasel
EXTRAVERSION =
NAME=Avast! A bilge rat!
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@ -1082,6 +1082,7 @@ help:
@echo 'Static analysers'
@echo ' checkstack - Generate a list of stack hogs'
@echo ' namespacecheck - Name space analysis on compiled kernel'
@echo ' headers_check - Sanity check on exported headers'
@echo ''
@echo 'Kernel packaging:'
@$(MAKE) $(build)=$(package-dir) help

View file

@ -956,6 +956,38 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
return 0;
}
/*
* This function checks if the entire range <start,end> is mapped with type.
*
* Note: this function only works correct if the e820 table is sorted and
* not-overlapping, which is the case
*/
int __init
e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
{
u64 start = s;
u64 end = e;
int i;
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
if (type && ei->type != type)
continue;
/* is the region (part) in overlap with the current region ?*/
if (ei->addr >= end || ei->addr + ei->size <= start)
continue;
/* if the region is at the beginning of <start,end> we move
* start to the end of the region since it's ok until there
*/
if (ei->addr <= start)
start = ei->addr + ei->size;
/* if start is now at or beyond end, we're done, full
* coverage */
if (start >= end)
return 1; /* we're done */
}
return 0;
}
/*
* Find the highest page frame number we have available
*/

View file

@ -237,11 +237,6 @@ char * __devinit pcibios_setup(char *str)
pci_probe &= ~PCI_PROBE_MMCONF;
return NULL;
}
/* override DMI blacklist */
else if (!strcmp(str, "mmconf")) {
pci_probe |= PCI_PROBE_MMCONF_FORCE;
return NULL;
}
#endif
else if (!strcmp(str, "noacpi")) {
acpi_noirq_set();

View file

@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <asm/e820.h>
#include "pci.h"
@ -188,31 +187,9 @@ static __init void unreachable_devices(void)
}
}
static int disable_mcfg(struct dmi_system_id *d)
{
printk("PCI: %s detected. Disabling MCFG.\n", d->ident);
pci_probe &= ~PCI_PROBE_MMCONF;
return 0;
}
static struct dmi_system_id __initdata dmi_bad_mcfg[] = {
/* Has broken MCFG table that makes the system hang when used */
{
.callback = disable_mcfg,
.ident = "Intel D3C5105 SDV",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "Intel"),
DMI_MATCH(DMI_BOARD_NAME, "D26928"),
},
},
{}
};
void __init pci_mmcfg_init(void)
{
dmi_check_system(dmi_bad_mcfg);
if ((pci_probe & (PCI_PROBE_MMCONF_FORCE|PCI_PROBE_MMCONF)) == 0)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
@ -221,6 +198,15 @@ void __init pci_mmcfg_init(void)
(pci_mmcfg_config[0].base_address == 0))
return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
printk(KERN_INFO "PCI: Using MMCONFIG\n");
raw_pci_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;

View file

@ -16,8 +16,7 @@
#define PCI_PROBE_CONF1 0x0002
#define PCI_PROBE_CONF2 0x0004
#define PCI_PROBE_MMCONF 0x0008
#define PCI_PROBE_MMCONF_FORCE 0x0010
#define PCI_PROBE_MASK 0x00ff
#define PCI_PROBE_MASK 0x000f
#define PCI_NO_SORT 0x0100
#define PCI_BIOS_SORT 0x0200

View file

@ -108,6 +108,35 @@ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
return 0;
}
/*
* This function checks if the entire range <start,end> is mapped with type.
*
* Note: this function only works correct if the e820 table is sorted and
* not-overlapping, which is the case
*/
int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
{
int i;
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
if (type && ei->type != type)
continue;
/* is the region (part) in overlap with the current region ?*/
if (ei->addr >= end || ei->addr + ei->size <= start)
continue;
/* if the region is at the beginning of <start,end> we move
* start to the end of the region since it's ok until there
*/
if (ei->addr <= start)
start = ei->addr + ei->size;
/* if start is now at or beyond end, we're done, full coverage */
if (start >= end)
return 1; /* we're done */
}
return 0;
}
/*
* Find a free area in a specific range.
*/

View file

@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/dmi.h>
#include <asm/e820.h>
#include "pci.h"
@ -165,33 +164,11 @@ static __init void unreachable_devices(void)
}
}
static int disable_mcfg(struct dmi_system_id *d)
{
printk("PCI: %s detected. Disabling MCFG.\n", d->ident);
pci_probe &= ~PCI_PROBE_MMCONF;
return 0;
}
static struct dmi_system_id __initdata dmi_bad_mcfg[] = {
/* Has broken MCFG table that makes the system hang when used */
{
.callback = disable_mcfg,
.ident = "Intel D3C5105 SDV",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "Intel"),
DMI_MATCH(DMI_BOARD_NAME, "D26928"),
},
},
{}
};
void __init pci_mmcfg_init(void)
{
int i;
dmi_check_system(dmi_bad_mcfg);
if ((pci_probe & (PCI_PROBE_MMCONF|PCI_PROBE_MMCONF_FORCE)) == 0)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
@ -200,6 +177,15 @@ void __init pci_mmcfg_init(void)
(pci_mmcfg_config[0].base_address == 0))
return;
if (!e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
/* RED-PEN i386 doesn't do _nocache right now */
pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
if (pci_mmcfg_virt == NULL) {

View file

@ -1928,7 +1928,9 @@ he_service_rbrq(struct he_dev *he_dev, int group)
#ifdef notdef
ATM_SKB(skb)->vcc = vcc;
#endif
spin_unlock(&he_dev->global_lock);
vcc->push(vcc, skb);
spin_lock(&he_dev->global_lock);
atomic_inc(&vcc->stats->rx);

View file

@ -908,7 +908,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
static int __xipram xip_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long adr, int *chip_op_time )
unsigned long adr, unsigned int chip_op_time )
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
@ -917,7 +917,7 @@ static int __xipram xip_wait_for_operation(
flstate_t oldstate, newstate;
start = xip_currtime();
usec = *chip_op_time * 8;
usec = chip_op_time * 8;
if (usec == 0)
usec = 500000;
done = 0;
@ -1027,8 +1027,8 @@ static int __xipram xip_wait_for_operation(
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
xip_wait_for_operation(map, chip, cmd_adr, p_usec)
#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
xip_wait_for_operation(map, chip, cmd_adr, usec)
#else
@ -1040,64 +1040,64 @@ static int __xipram xip_wait_for_operation(
static int inval_cache_and_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
int *chip_op_time )
unsigned int chip_op_time)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
int z, chip_state = chip->state;
unsigned long timeo;
int chip_state = chip->state;
unsigned int timeo, sleep_time;
spin_unlock(chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
if (*chip_op_time)
cfi_udelay(*chip_op_time);
spin_lock(chip->mutex);
timeo = *chip_op_time * 8 * HZ / 1000000;
if (timeo < HZ/2)
timeo = HZ/2;
timeo += jiffies;
/* set our timeout to 8 times the expected delay */
timeo = chip_op_time * 8;
if (!timeo)
timeo = 500000;
sleep_time = chip_op_time / 2;
z = 0;
for (;;) {
if (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
spin_lock(chip->mutex);
continue;
}
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
/* OK Still waiting */
if (time_after(jiffies, timeo)) {
if (!timeo) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
return -ETIME;
}
/* Latency issues. Drop the lock, wait a while and retry */
z++;
/* OK Still waiting. Drop the lock, wait a while and retry. */
spin_unlock(chip->mutex);
cfi_udelay(1);
if (sleep_time >= 1000000/HZ) {
/*
* Half of the normal delay still remaining
* can be performed with a sleeping delay instead
* of busy waiting.
*/
msleep(sleep_time/1000);
timeo -= sleep_time;
sleep_time = 1000000/HZ;
} else {
udelay(1);
cond_resched();
timeo--;
}
spin_lock(chip->mutex);
}
if (!z) {
if (!--(*chip_op_time))
*chip_op_time = 1;
} else if (z > 1)
++(*chip_op_time);
if (chip->state != chip_state) {
/* Someone's suspended the operation: sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
spin_unlock(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
spin_lock(chip->mutex);
}
}
/* Done and happy. */
chip->state = FL_STATUS;
@ -1107,8 +1107,7 @@ static int inval_cache_and_wait_for_operation(
#endif
#define WAIT_TIMEOUT(map, chip, adr, udelay) \
({ int __udelay = (udelay); \
INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@ -1332,7 +1331,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, map_bankwidth(map),
&chip->word_write_time);
chip->word_write_time);
if (ret) {
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
@ -1569,7 +1568,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
adr, len,
&chip->buffer_write_time);
chip->buffer_write_time);
if (ret) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
@ -1704,7 +1703,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
&chip->erase_time);
chip->erase_time);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;

View file

@ -45,9 +45,11 @@
#define MAX_WORD_RETRIES 3
#define MANUFACTURER_AMD 0x0001
#define MANUFACTURER_ATMEL 0x001F
#define MANUFACTURER_SST 0x00BF
#define SST49LF004B 0x0060
#define SST49LF008A 0x005a
#define AT49BV6416 0x00d6
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@ -68,6 +70,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
@ -161,6 +166,26 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
}
}
/* Atmel chips don't use the same PRI format as AMD chips */
static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
struct cfi_pri_atmel atmel_pri;
memcpy(&atmel_pri, extp, sizeof(atmel_pri));
memset((char *)extp + 5, 0, sizeof(*extp) - 5);
if (atmel_pri.Features & 0x02)
extp->EraseSuspend = 2;
if (atmel_pri.BottomBoot)
extp->TopBottom = 2;
else
extp->TopBottom = 3;
}
static void fixup_use_secsi(struct mtd_info *mtd, void *param)
{
/* Setup for chips with a secsi area */
@ -179,6 +204,16 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
}
/*
* Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
* locked by default.
*/
static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
{
mtd->lock = cfi_atmel_lock;
mtd->unlock = cfi_atmel_unlock;
}
static struct cfi_fixup cfi_fixup_table[] = {
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
@ -192,6 +227,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
#endif
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
@ -207,6 +243,7 @@ static struct cfi_fixup fixup_table[] = {
* we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
{ 0, 0, NULL, NULL }
};
@ -1607,6 +1644,80 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
return 0;
}
static int do_atmel_lock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
spin_lock(chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
chip->state = FL_LOCKING;
DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
__func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
map_write(map, CMD(0x40), chip->start + adr);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
ret = 0;
out_unlock:
spin_unlock(chip->mutex);
return ret;
}
static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
spin_lock(chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
chip->state = FL_UNLOCKING;
DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
__func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
map_write(map, CMD(0x70), adr);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
ret = 0;
out_unlock:
spin_unlock(chip->mutex);
return ret;
}
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
}
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
static void cfi_amdstd_sync (struct mtd_info *mtd)
{

View file

@ -111,6 +111,7 @@
#define MX29LV040C 0x004F
#define MX29LV160T 0x22C4
#define MX29LV160B 0x2249
#define MX29F040 0x00A4
#define MX29F016 0x00AD
#define MX29F002T 0x00B0
#define MX29F004T 0x0045
@ -1171,6 +1172,19 @@ static const struct amd_flash_info jedec_table[] = {
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
.uaddr = {
[0] = MTD_UADDR_0x0555_0x02AA /* x8 */
},
.DevSize = SIZE_512KiB,
.CmdSet = P_ID_AMD_STD,
.NumEraseRegions= 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",

View file

@ -18,6 +18,7 @@
#include <linux/mtd/mtd.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#define VERSION "$Revision: 1.30 $"
@ -236,6 +237,8 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
}
return 0;
}
static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
@ -299,6 +302,19 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Get a handle on the device */
bdev = open_bdev_excl(devname, O_RDWR, NULL);
#ifndef MODULE
if (IS_ERR(bdev)) {
/* We might not have rootfs mounted at this point. Try
to resolve the device name by other means. */
dev_t dev = name_to_dev_t(devname);
if (dev != 0) {
bdev = open_by_devnum(dev, FMODE_WRITE | FMODE_READ);
}
}
#endif
if (IS_ERR(bdev)) {
ERROR("error: cannot open device %s", devname);
goto devinit_err;
@ -393,26 +409,6 @@ static int parse_num(size_t *num, const char *token)
}
static int parse_name(char **pname, const char *token, size_t limit)
{
size_t len;
char *name;
len = strlen(token) + 1;
if (len > limit)
return -ENOSPC;
name = kmalloc(len, GFP_KERNEL);
if (!name)
return -ENOMEM;
strcpy(name, token);
*pname = name;
return 0;
}
static inline void kill_final_newline(char *str)
{
char *newline = strrchr(str, '\n');
@ -426,9 +422,15 @@ static inline void kill_final_newline(char *str)
return 0; \
} while (0)
static int block2mtd_setup(const char *val, struct kernel_param *kp)
#ifndef MODULE
static int block2mtd_init_called = 0;
static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
#endif
static int block2mtd_setup2(const char *val)
{
char buf[80+12]; /* 80 for device, 12 for erase size */
char buf[80 + 12]; /* 80 for device, 12 for erase size */
char *str = buf;
char *token[2];
char *name;
@ -450,13 +452,9 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
if (!token[0])
parse_err("no argument");
ret = parse_name(&name, token[0], 80);
if (ret == -ENOMEM)
parse_err("out of memory");
if (ret == -ENOSPC)
parse_err("name too long");
if (ret)
return 0;
name = token[0];
if (strlen(name) + 1 > 80)
parse_err("device name too long");
if (token[1]) {
ret = parse_num(&erase_size, token[1]);
@ -472,13 +470,48 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
}
static int block2mtd_setup(const char *val, struct kernel_param *kp)
{
#ifdef MODULE
return block2mtd_setup2(val);
#else
/* If more parameters are later passed in via
/sys/module/block2mtd/parameters/block2mtd
and block2mtd_init() has already been called,
we can parse the argument now. */
if (block2mtd_init_called)
return block2mtd_setup2(val);
/* During early boot stage, we only save the parameters
here. We must parse them later: if the param passed
from kernel boot command line, block2mtd_setup() is
called so early that it is not possible to resolve
the device (even kmalloc() fails). Deter that work to
block2mtd_setup2(). */
strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
return 0;
#endif
}
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
static int __init block2mtd_init(void)
{
int ret = 0;
INFO("version " VERSION);
return 0;
#ifndef MODULE
if (strlen(block2mtd_paramline))
ret = block2mtd_setup2(block2mtd_paramline);
block2mtd_init_called = 1;
#endif
return ret;
}

View file

@ -406,13 +406,13 @@ struct flash_info {
static struct flash_info __devinitdata m25p_data [] = {
/* REVISIT: fill in JEDEC ids, for parts that have them */
{ "m25p05", 0x05, 0x0000, 32 * 1024, 2 },
{ "m25p10", 0x10, 0x0000, 32 * 1024, 4 },
{ "m25p20", 0x11, 0x0000, 64 * 1024, 4 },
{ "m25p40", 0x12, 0x0000, 64 * 1024, 8 },
{ "m25p05", 0x05, 0x2010, 32 * 1024, 2 },
{ "m25p10", 0x10, 0x2011, 32 * 1024, 4 },
{ "m25p20", 0x11, 0x2012, 64 * 1024, 4 },
{ "m25p40", 0x12, 0x2013, 64 * 1024, 8 },
{ "m25p80", 0x13, 0x0000, 64 * 1024, 16 },
{ "m25p16", 0x14, 0x0000, 64 * 1024, 32 },
{ "m25p32", 0x15, 0x0000, 64 * 1024, 64 },
{ "m25p16", 0x14, 0x2015, 64 * 1024, 32 },
{ "m25p32", 0x15, 0x2016, 64 * 1024, 64 },
{ "m25p64", 0x16, 0x2017, 64 * 1024, 128 },
};

View file

@ -99,10 +99,6 @@
#include <asm/system.h>
#include <linux/pci.h>
#ifndef CONFIG_PCI
#error Enable PCI in your kernel config
#endif
#include <linux/mtd/mtd.h>
#include <linux/mtd/pmc551.h>
#include <linux/mtd/compatmac.h>

View file

@ -13,13 +13,13 @@ config MTD_COMPLEX_MAPPINGS
config MTD_PHYSMAP
tristate "CFI Flash device in physical memory map"
depends on MTD_CFI
depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM
help
This provides a 'mapping' driver which allows the CFI probe and
command set driver code to communicate with flash chips which
are mapped physically into the CPU's memory. You will need to
configure the physical address and size of the flash chips on
your particular board as well as the bus width, either statically
This provides a 'mapping' driver which allows the NOR Flash and
ROM driver code to communicate with chips which are mapped
physically into the CPU's memory. You will need to configure
the physical address and size of the flash chips on your
particular board as well as the bus width, either statically
with config options or at run-time.
config MTD_PHYSMAP_START

View file

@ -62,15 +62,12 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
struct mtd_info *mtd = mfi->mtd;
switch (orig) {
case 0:
/* SEEK_SET */
case SEEK_SET:
break;
case 1:
/* SEEK_CUR */
case SEEK_CUR:
offset += file->f_pos;
break;
case 2:
/* SEEK_END */
case SEEK_END:
offset += mtd->size;
break;
default:

View file

@ -11,7 +11,7 @@ config MTD_NAND
help
This enables support for accessing all type of NAND flash
devices. For further information see
<http://www.linux-mtd.infradead.org/tech/nand.html>.
<http://www.linux-mtd.infradead.org/doc/nand.html>.
config MTD_NAND_VERIFY_WRITE
bool "Verify NAND page writes"

View file

@ -2224,7 +2224,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
}
/* Try to identify manufacturer */
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_id++) {
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
if (nand_manuf_ids[maf_idx].id == *maf_id)
break;
}

View file

@ -78,7 +78,7 @@ static struct mtd_partition sharpsl_nand_default_partition_info[] = {
/*
* hardware specific access to control-lines
* ctrl:
* NAND_CNE: bit 0 -> bit 0 & 4
* NAND_CNE: bit 0 -> ! bit 0 & 4
* NAND_CLE: bit 1 -> bit 1
* NAND_ALE: bit 2 -> bit 2
*
@ -92,7 +92,10 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned char bits = ctrl & 0x07;
bits |= (ctrl & 0x01) << 4;
writeb((readb(FLASHCTL) & 0x17) | bits, FLASHCTL);
bits ^= 0x11;
writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL);
}
if (cmd != NAND_CMD_NONE)

View file

@ -539,7 +539,6 @@ unsigned long ext2_count_free (struct buffer_head * map, unsigned int numchars)
#endif /* EXT2FS_DEBUG */
/* Superblock must be locked */
unsigned long ext2_count_free_blocks (struct super_block * sb)
{
struct ext2_group_desc * desc;

View file

@ -637,7 +637,6 @@ fail:
return ERR_PTR(err);
}
/* Superblock must be locked */
unsigned long ext2_count_free_inodes (struct super_block * sb)
{
struct ext2_group_desc *desc;

View file

@ -1083,7 +1083,6 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
unsigned long overhead;
int i;
lock_super(sb);
if (test_opt (sb, MINIX_DF))
overhead = 0;
else {
@ -1124,7 +1123,6 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count);
buf->f_ffree = ext2_count_free_inodes (sb);
buf->f_namelen = EXT2_NAME_LEN;
unlock_super(sb);
return 0;
}

View file

@ -21,6 +21,9 @@
#include <linux/pagemap.h>
#include "nodelist.h"
static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c,
struct jffs2_node_frag *this);
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
{
struct jffs2_full_dirent **prev = list;
@ -87,7 +90,8 @@ void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint
}
}
void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c,
struct jffs2_node_frag *this)
{
if (this->node) {
this->node->frags--;

View file

@ -334,7 +334,6 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
struct rb_node *rb_next(struct rb_node *);
struct rb_node *rb_prev(struct rb_node *);
void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this);
int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn);

View file

@ -1215,7 +1215,6 @@ int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xatt
rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE);
if (rc) {
JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen);
rc = rc ? rc : -EBADFD;
goto out;
}
rc = save_xattr_datum(c, xd);

View file

@ -970,7 +970,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
status = -ENOMEM;
opendata = nfs4_opendata_alloc(dentry, sp, flags, sattr);
if (opendata == NULL)
goto err_put_state_owner;
goto err_release_rwsem;
status = _nfs4_proc_open(opendata);
if (status != 0)
@ -989,11 +989,11 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
return 0;
err_opendata_free:
nfs4_opendata_free(opendata);
err_release_rwsem:
up_read(&clp->cl_sem);
err_put_state_owner:
nfs4_put_state_owner(sp);
out_err:
/* Note: clp->cl_sem must be released before nfs4_put_open_state()! */
up_read(&clp->cl_sem);
*res = NULL;
return status;
}

View file

@ -204,9 +204,11 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
spin_unlock(&inode->i_lock);
nfs_readpage_truncate_uninitialised_page(rdata);
if (rdata->res.eof || rdata->res.count == rdata->args.count)
if (rdata->res.eof || rdata->res.count == rdata->args.count) {
SetPageUptodate(page);
if (rdata->res.eof && count != 0)
memclear_highpage_flush(page, rdata->args.pgbase, count);
}
result = 0;
io_error:

View file

@ -590,8 +590,8 @@ static void nfs_cancel_commit_list(struct list_head *head)
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_inode_remove_request(req);
nfs_clear_page_writeback(req);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
nfs_clear_page_writeback(req);
}
}
@ -1386,8 +1386,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_commit(req);
nfs_clear_page_writeback(req);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
}

View file

@ -2,6 +2,8 @@
#define _M68K_PAGE_H
#ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */
#ifndef CONFIG_SUN3
#define PAGE_SHIFT (12)
@ -15,8 +17,6 @@
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#include <asm/setup.h>
#if PAGE_SHIFT < 13
@ -175,8 +175,8 @@ static inline void *__va(unsigned long x)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#include <asm-generic/page.h>
#endif /* __KERNEL__ */
#endif /* _M68K_PAGE_H */

View file

@ -1,22 +1,14 @@
#ifndef _PARISC_PAGE_H
#define _PARISC_PAGE_H
#if !defined(__KERNEL__)
/* this is for userspace applications (4k page size) */
# define PAGE_SHIFT 12 /* 4k */
# define PAGE_SIZE (1UL << PAGE_SHIFT)
# define PAGE_MASK (~(PAGE_SIZE-1))
#endif
#ifdef __KERNEL__
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
# define PAGE_SHIFT 12 /* 4k */
# define PAGE_SHIFT 12
#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
# define PAGE_SHIFT 14 /* 16k */
# define PAGE_SHIFT 14
#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
# define PAGE_SHIFT 16 /* 64k */
# define PAGE_SHIFT 16
#else
# error "unknown default kernel page size"
#endif
@ -188,9 +180,9 @@ extern int npmem_ranges;
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
#endif /* __KERNEL__ */
#endif /* _PARISC_PAGE_H */

View file

@ -199,6 +199,18 @@ struct cfi_pri_amdstd {
uint8_t TopBottom;
} __attribute__((packed));
/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
struct cfi_pri_atmel {
uint8_t pri[3];
uint8_t MajorVersion;
uint8_t MinorVersion;
uint8_t Features;
uint8_t BottomBoot;
uint8_t BurstMode;
uint8_t PageMode;
} __attribute__((packed));
struct cfi_pri_query {
uint8_t NumFields;
uint32_t ProtField[1]; /* Not host ordered */
@ -464,6 +476,7 @@ struct cfi_fixup {
#define CFI_ID_ANY 0xffff
#define CFI_MFR_AMD 0x0001
#define CFI_MFR_ATMEL 0x001F
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);

View file

@ -252,7 +252,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
mask_ack_irq(desc, irq);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out;
goto out_unlock;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;
@ -263,7 +263,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
desc->status |= IRQ_PENDING;
goto out;
goto out_unlock;
}
desc->status |= IRQ_INPROGRESS;
@ -276,9 +276,9 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
spin_unlock(&desc->lock);
}

View file

@ -1281,18 +1281,18 @@ static inline int check_leaf(struct trie *t, struct leaf *l,
struct fib_result *res)
{
int err, i;
t_key mask;
__be32 mask;
struct leaf_info *li;
struct hlist_head *hhead = &l->list;
struct hlist_node *node;
hlist_for_each_entry_rcu(li, node, hhead, hlist) {
i = li->plen;
mask = ntohl(inet_make_mask(i));
if (l->key != (key & mask))
mask = inet_make_mask(i);
if (l->key != (key & ntohl(mask)))
continue;
if ((err = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) <= 0) {
if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
*plen = i;
#ifdef CONFIG_IP_FIB_TRIE_STATS
t->stats.semantic_match_passed++;

View file

@ -11,6 +11,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
MODULE_ALIAS("ipt_quota");
MODULE_ALIAS("ip6t_quota");
static DEFINE_SPINLOCK(quota_lock);