1
0
Fork 0

This pull request contains the following core changes:

General changes:
    * Unconfuse get_unmapped_area and point/unpoint driver methods
    * New partition parser: sharpslpart
    * Kill GENERIC_IO
    * Various fixes
 
 NAND changes:
    * Add a flag to mark NANDs that require 3 address cycles to encode a
      page address
    * Set a default ECC/free layout when NAND_ECC_NONE is requested
    * Fix a bug in panic_nand_write()
    * Another batch of cleanups for the denali driver
    * Fix PM support in the atmel driver
    * Remove support for platform data in the omap driver
    * Fix subpage write in the omap driver
    * Fix irq handling in the mtk driver
    * Change link order of mtk_ecc and mtk_nand drivers to speed up boot
      time
    * Change log level of ECC error messages in the mxc driver
    * Patch the pxa3xx driver to support Armada 8k platforms
    * Add BAM DMA support to the qcom driver
    * Convert gpio-nand to the GPIO desc API
    * Fix ECC handling in the mt29f driver
 
 SPI-NOR changes:
    * Introduce system power management support
    * New mechanism to select the proper .quad_enable() hook by JEDEC ID,
      when needed, instead of only by manufacturer ID
    * Add support to new memory parts from Gigadevice, Winbond, Macronix and
      Everspin
    * Maintainance for Cadence, Intel, Mediatek and STM32 drivers
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABAgAGBQJaEzkZAAoJEGb5WYXrGLvBiUMP/25eEatNd5pGo9rtXqX463kp
 Q8zXGwtGp7Y2ThtC2TMbSSZZFdhGXIv3AUGpW+Y1yFMzGbiwWh8T28rdgDKDINhl
 jQteoWGQnZnnLhsMEbApJUqqtlxKFkY6COv/fUItmN8a4E5SyYF6ARKdnxH36Quu
 j/i3Kyd1FjDzJE2jsAE6TuomlNRuj/4S0OiZBTlgMhQvbo282Rush6RmF5zAvsdN
 B+S45Q752Pypg3U+1IYkqFSOtSYS3NM1ynZW7YXdWDwcKxDnKvasebSi+wCqPVc8
 n6hkcnXKIMOB6/bGhLg3FZlrzJcH7cbxy2C40NKFmMa7gw+/h1bmvjZk9hubLEc3
 +EJ8/1e8Z/KNTGu+Iyy2BNHTLI+KFKM5n/7/mpSPHMP/0uQjYs95GUmPlhVrenuv
 wprVsQKj7k92E+5Vm/h+Gys67sEG/rQK0v9UEConzl1s2T7i/hnA2lhPfIFmbMU/
 9U2s0CFobDqFUh+O6FSkLg9AT7+gT2HA1t6bbDTJMgnbFW72vlDUiArniia9hWOx
 dSc5pxMnaSiiqk+uCma4zLv2/3Tyi5dAEMQy+qAlK1EpmwPAsyu3SEMbyraovb9S
 PW0YQcMxVlQ/+EdDZCi83ypMlMQE/fDNcuKVMQD9enbko9yKGEgSZsTm9XwIvAv6
 g0P5jYMind1aNNSfg/QM
 =wVm7
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-20171120' of git://git.infradead.org/linux-mtd

Pull MTD updates from Richard Weinberger:
 "General changes:
   -  Unconfuse get_unmapped_area and point/unpoint driver methods
   -  New partition parser: sharpslpart
   -  Kill GENERIC_IO
   -  Various fixes

  NAND changes:
   -  Add a flag to mark NANDs that require 3 address cycles to encode a
      page address
   -  Set a default ECC/free layout when NAND_ECC_NONE is requested
   -  Fix a bug in panic_nand_write()
   -  Another batch of cleanups for the denali driver
   -  Fix PM support in the atmel driver
   -  Remove support for platform data in the omap driver
   -  Fix subpage write in the omap driver
   -  Fix irq handling in the mtk driver
   -  Change link order of mtk_ecc and mtk_nand drivers to speed up boot
      time
   -  Change log level of ECC error messages in the mxc driver
   -  Patch the pxa3xx driver to support Armada 8k platforms
   -  Add BAM DMA support to the qcom driver
   -  Convert gpio-nand to the GPIO desc API
   -  Fix ECC handling in the mt29f driver

  SPI-NOR changes:
   -  Introduce system power management support
   -  New mechanism to select the proper .quad_enable() hook by JEDEC
      ID, when needed, instead of only by manufacturer ID
   -  Add support to new memory parts from Gigadevice, Winbond, Macronix
      and Everspin
   -  Maintainance for Cadence, Intel, Mediatek and STM32 drivers"

*  tag 'for-linus-20171120' of git://git.infradead.org/linux-mtd: (85 commits)
  mtd: Avoid probe failures when mtd->dbg.dfs_dir is invalid
  mtd: sharpslpart: Add sharpslpart partition parser
  mtd: Add sanity checks in mtd_write/read_oob()
  mtd: remove the get_unmapped_area method
  mtd: implement mtd_get_unmapped_area() using the point method
  mtd: chips/map_rom.c: implement point and unpoint methods
  mtd: chips/map_ram.c: implement point and unpoint methods
  mtd: mtdram: properly handle the phys argument in the point method
  mtd: mtdswap: fix spelling mistake: 'TRESHOLD' -> 'THRESHOLD'
  mtd: slram: use memremap() instead of ioremap()
  kconfig: kill off GENERIC_IO option
  mtd: Fix C++ comment in include/linux/mtd/mtd.h
  mtd: constify mtd_partition
  mtd: plat-ram: Replace manual resource management by devm
  mtd: nand: Fix writing mtdoops to nand flash.
  mtd: intel-spi: Add Intel Lewisburg PCH SPI super SKU PCI ID
  mtd: nand: mtk: fix infinite ECC decode IRQ issue
  mtd: spi-nor: Add support for mr25h128
  mtd: nand: mtk: change the compile sequence of mtk_nand.o and mtk_ecc.o
  mtd: spi-nor: enable 4B opcodes for mx66l51235l
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-11-22 20:46:06 -10:00
commit 14b661ebb6
67 changed files with 1650 additions and 756 deletions

View File

@ -1,7 +1,9 @@
* Cadence Quad SPI controller
Required properties:
- compatible : Should be "cdns,qspi-nor".
- compatible : should be one of the following:
Generic default - "cdns,qspi-nor".
For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor".
- reg : Contains two entries, each of which is a tuple consisting of a
physical address and length. The first entry is the address and
length of the controller register set. The second entry is the
@ -14,6 +16,9 @@ Required properties:
Optional properties:
- cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
- cdns,rclk-en : Flag to indicate that QSPI return clock is used to latch
the read data rather than the QSPI clock. Make sure that QSPI return
clock is populated on the board before using this property.
Optional subnodes:
Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional

View File

@ -29,7 +29,7 @@ nand: nand@ff900000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "altr,socfpga-denali-nand";
reg = <0xff900000 0x100000>, <0xffb80000 0x10000>;
reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
reg-names = "nand_data", "denali_reg";
interrupts = <0 144 4>;
};

View File

@ -14,6 +14,7 @@ Required properties:
at25df641
at26df081a
en25s64
mr25h128
mr25h256
mr25h10
mr25h40

View File

@ -1,13 +1,16 @@
* Serial NOR flash controller for MTK MT81xx (and similar)
Required properties:
- compatible: The possible values are:
"mediatek,mt2701-nor"
"mediatek,mt7623-nor"
- compatible: For mt8173, compatible should be "mediatek,mt8173-nor",
and it's the fallback compatible for other Soc.
For every other SoC, should contain both the SoC-specific compatible
string and "mediatek,mt8173-nor".
The possible values are:
"mediatek,mt2701-nor", "mediatek,mt8173-nor"
"mediatek,mt2712-nor", "mediatek,mt8173-nor"
"mediatek,mt7622-nor", "mediatek,mt8173-nor"
"mediatek,mt7623-nor", "mediatek,mt8173-nor"
"mediatek,mt8173-nor"
For mt8173, compatible should be "mediatek,mt8173-nor".
For every other SoC, should contain both the SoC-specific compatible string
and "mediatek,mt8173-nor".
- reg: physical base address and length of the controller's register
- clocks: the phandle of the clocks needed by the nor controller
- clock-names: the names of the clocks

View File

@ -5,9 +5,13 @@ Required properties:
- compatible: Should be set to one of the following:
marvell,pxa3xx-nand
marvell,armada370-nand
marvell,armada-8k-nand
- reg: The register base for the controller
- interrupts: The interrupt to map
- #address-cells: Set to <1> if the node includes partitions
- marvell,system-controller: Set to retrieve the syscon node that handles
NAND controller related registers (only required
with marvell,armada-8k-nand compatible).
Optional properties:

View File

@ -14,7 +14,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand-gpio.h>
#include <linux/gpio/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
@ -176,6 +176,17 @@ static inline void cmx255_init_nor(void) {}
#endif
#if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE)
static struct gpiod_lookup_table cmx255_nand_gpiod_table = {
.dev_id = "gpio-nand",
.table = {
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH),
},
};
static struct resource cmx255_nand_resource[] = {
[0] = {
.start = PXA_CS1_PHYS,
@ -198,11 +209,6 @@ static struct mtd_partition cmx255_nand_parts[] = {
};
static struct gpio_nand_platdata cmx255_nand_platdata = {
.gpio_nce = GPIO_NAND_CS,
.gpio_cle = GPIO_NAND_CLE,
.gpio_ale = GPIO_NAND_ALE,
.gpio_rdy = GPIO_NAND_RB,
.gpio_nwp = -1,
.parts = cmx255_nand_parts,
.num_parts = ARRAY_SIZE(cmx255_nand_parts),
.chip_delay = 25,
@ -220,6 +226,7 @@ static struct platform_device cmx255_nand = {
static void __init cmx255_init_nand(void)
{
gpiod_add_lookup_table(&cmx255_nand_gpiod_table);
platform_device_register(&cmx255_nand);
}
#else

View File

@ -10,7 +10,6 @@ config UML
select HAVE_DEBUG_KMEMLEAK
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO
select GENERIC_CLOCKEVENTS
select HAVE_GCC_PLUGINS
select TTY # Needed for line.c

View File

@ -1,6 +1,5 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
depends on GENERIC_IO
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option

View File

@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
unsigned long, unsigned long);
static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver mapram_chipdrv = {
@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
mtd->_get_unmapped_area = mapram_unmapped_area;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
return mtd;
}
/*
* Allow NOMMU mmap() to directly map the device (if not NULL)
* - return the address to which the offset maps
* - return -ENOSYS to indicate refusal to do the mapping
*/
static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
return (unsigned long) map->virt + offset;
if (!map->virt)
return -EINVAL;
*virt = map->virt + from;
if (phys)
*phys = map->phys + from;
*retlen = len;
return 0;
}
static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)

View File

@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static void maprom_nop (struct mtd_info *);
static struct mtd_info *map_rom_probe(struct map_info *map);
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
unsigned long, unsigned long);
static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver maprom_chipdrv = {
.probe = map_rom_probe,
@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
mtd->_get_unmapped_area = maprom_unmapped_area;
mtd->_point = maprom_point;
mtd->_unpoint = maprom_unpoint;
mtd->_read = maprom_read;
mtd->_write = maprom_write;
mtd->_sync = maprom_nop;
@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
}
/*
* Allow NOMMU mmap() to directly map the device (if not NULL)
* - return the address to which the offset maps
* - return -ENOSYS to indicate refusal to do the mapping
*/
static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
return (unsigned long) map->virt + offset;
if (!map->virt)
return -EINVAL;
*virt = map->virt + from;
if (phys)
*phys = map->phys + from;
*retlen = len;
return 0;
}
static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
return 0;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)

View File

@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
struct dentry *root = floor->dbg.dfs_dir;
struct docg3 *docg3 = floor->priv;
if (IS_ERR_OR_NULL(root))
if (IS_ERR_OR_NULL(root)) {
if (IS_ENABLED(CONFIG_DEBUG_FS) &&
!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
dev_warn(floor->dev.parent,
"CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return;
}
debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
&flashcontrol_fops);

View File

@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = {
}
};
static struct mtd_partition lart_partitions[] = {
static const struct mtd_partition lart_partitions[] = {
/* blob */
{
.name = "blob",

View File

@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = {
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
/* Everspin MRAMs (non-JEDEC) */
{ "mr25h128" }, /* 128 Kib, 40 MHz */
{ "mr25h256" }, /* 256 Kib, 40 MHz */
{ "mr25h10" }, /* 1 Mib, 40 MHz */
{ "mr25h40" }, /* 4 Mib, 40 MHz */

View File

@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtdram.h>
@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
{
*virt = mtd->priv + from;
*retlen = len;
if (phys) {
/* limit retlen to the number of contiguous physical pages */
unsigned long page_ofs = offset_in_page(*virt);
void *addr = *virt - page_ofs;
unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
*phys = __pfn_to_phys(pfn0) + page_ofs;
len += page_ofs;
while (len > PAGE_SIZE) {
len -= PAGE_SIZE;
addr += PAGE_SIZE;
pfn0++;
pfn1 = vmalloc_to_pfn(addr);
if (pfn1 != pfn0) {
*retlen = addr - *virt;
break;
}
}
}
return 0;
}
@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return 0;
}
/*
* Allow NOMMU mmap() to directly map the device (if not NULL)
* - return the address to which the offset maps
* - return -ENOSYS to indicate refusal to do the mapping
*/
static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
{
return (unsigned long) mtd->priv + offset;
}
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->_erase = ram_erase;
mtd->_point = ram_point;
mtd->_unpoint = ram_unpoint;
mtd->_get_unmapped_area = ram_get_unmapped_area;
mtd->_read = ram_read;
mtd->_write = ram_write;

View File

@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length
}
if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
ioremap(start, length))) {
E("slram: ioremap failed\n");
memremap(start, length,
MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
E("slram: memremap failed\n");
return -EIO;
}
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
kfree((*curmtd)->mtdinfo);
return(-EAGAIN);
@ -206,7 +207,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
mtd_device_unregister(slram_mtdlist->mtdinfo);
iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
kfree(slram_mtdlist);

View File

@ -61,7 +61,7 @@ static struct map_info flagadm_map = {
.bankwidth = 2,
};
static struct mtd_partition flagadm_parts[] = {
static const struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,

View File

@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
/*
* MTD partitioning stuff
*/
static struct mtd_partition partitions[] =
static const struct mtd_partition partitions[] =
{
{
.name = "FileSystem",

View File

@ -52,7 +52,7 @@
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
static struct mtd_partition partition_info[]={
static const struct mtd_partition partition_info[] = {
{
.name = "NetSc520 boot kernel",
.offset = 0,

View File

@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = {
.bankwidth = AMD_BUSWIDTH,
};
static struct mtd_partition nettel_amd_partitions[] = {
static const struct mtd_partition nettel_amd_partitions[] = {
{
.name = "SnapGear BIOS config",
.offset = 0x000e0000,

View File

@ -43,7 +43,6 @@ struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
struct resource *area;
struct platdata_mtd_ram *pdata;
};
@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RO);
/* release resources */
if (info->area) {
release_resource(info->area);
kfree(info->area);
}
if (info->map.virt != NULL)
iounmap(info->map.virt);
kfree(info);
return 0;
@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no memory resource specified\n");
err = -ENOENT;
info->map.virt = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->map.virt)) {
err = PTR_ERR(info->map.virt);
dev_err(&pdev->dev, "failed to ioremap() region\n");
goto exit_free;
}
@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev)
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
/* register our usage of the memory area */
info->area = request_mem_region(res->start, info->map.size, pdev->name);
if (info->area == NULL) {
dev_err(&pdev->dev, "failed to request memory region\n");
err = -EIO;
goto exit_free;
}
/* remap the memory area */
info->map.virt = ioremap(res->start, info->map.size);
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
if (info->map.virt == NULL) {
dev_err(&pdev->dev, "failed to ioremap() region\n");
err = -EIO;
goto exit_free;
}
simple_map_init(&info->map);
dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");

View File

@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin);
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
static struct mtd_partition partition_info[]={
static const struct mtd_partition partition_info[] = {
{ .name = "SBC-GXx flash boot partition",
.offset = 0,
.size = BOOT_PARTITION_SIZE_KiB*1024 },

View File

@ -43,7 +43,7 @@ static struct map_info ts5500_map = {
.phys = WINDOW_ADDR
};
static struct mtd_partition ts5500_partitions[] = {
static const struct mtd_partition ts5500_partitions[] = {
{
.name = "Drive A",
.offset = 0,

View File

@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
static struct mtd_partition uclinux_romfs[] = {
static const struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};

View File

@ -643,32 +643,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
return err;
}
/*
* try to support NOMMU mmaps on concatenated devices
* - we don't support subdev spanning as we can't guarantee it'll work
*/
static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
{
struct mtd_concat *concat = CONCAT(mtd);
int i;
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
if (offset >= subdev->size) {
offset -= subdev->size;
continue;
}
return mtd_get_unmapped_area(subdev, len, offset, flags);
}
return (unsigned long) -ENOSYS;
}
/*
* This function constructs a virtual MTD device by concatenating
* num_devs MTD devices. A pointer to the new device object is
@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd._unlock = concat_unlock;
concat->mtd._suspend = concat_suspend;
concat->mtd._resume = concat_resume;
concat->mtd._get_unmapped_area = concat_get_unmapped_area;
/*
* Combine the erase block size info of the subdevices:

View File

@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint);
unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
unsigned long offset, unsigned long flags)
{
if (!mtd->_get_unmapped_area)
return -EOPNOTSUPP;
if (offset >= mtd->size || len > mtd->size - offset)
return -EINVAL;
return mtd->_get_unmapped_area(mtd, len, offset, flags);
size_t retlen;
void *virt;
int ret;
ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
if (ret)
return ret;
if (retlen != len) {
mtd_unpoint(mtd, offset, retlen);
return -ENOSYS;
}
return (unsigned long)virt;
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
struct mtd_oob_ops *ops)
{
/*
* Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
* ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
* this case.
*/
if (!ops->datbuf)
ops->len = 0;
if (!ops->oobbuf)
ops->ooblen = 0;
if (offs < 0 || offs + ops->len >= mtd->size)
return -EINVAL;
if (ops->ooblen) {
u64 maxooblen;
if (ops->ooboffs >= mtd_oobavail(mtd, ops))
return -EINVAL;
maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
mtd_div_by_ws(offs, mtd)) *
mtd_oobavail(mtd, ops)) - ops->ooboffs;
if (ops->ooblen > maxooblen)
return -EINVAL;
}
return 0;
}
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!mtd->_read_oob)
return -EOPNOTSUPP;
ret_code = mtd_check_oob_ops(mtd, from, ops);
if (ret_code)
return ret_code;
ledtrig_mtd_activity();
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
int ret;
ops->retlen = ops->oobretlen = 0;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
ret = mtd_check_oob_ops(mtd, to, ops);
if (ret)
return ret;
ledtrig_mtd_activity();
return mtd->_write_oob(mtd, to, ops);
}

View File

@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return part->parent->_unpoint(part->parent, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags)
{
struct mtd_part *part = mtd_to_part(mtd);
offset += part->offset;
return part->parent->_get_unmapped_area(part->parent, len, offset,
flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
slave->mtd._unpoint = part_unpoint;
}
if (parent->_get_unmapped_area)
slave->mtd._get_unmapped_area = part_get_unmapped_area;
if (parent->_read_oob)
slave->mtd._read_oob = part_read_oob;
if (parent->_write_oob)

View File

@ -50,7 +50,7 @@
* Number of free eraseblocks below which GC can also collect low frag
* blocks.
*/
#define LOW_FRAG_GC_TRESHOLD 5
#define LOW_FRAG_GC_THRESHOLD 5
/*
* Wear level cost amortization. We want to do wear leveling on the background
@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
{
int idx, stopat;
if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
stopat = MTDSWAP_LOWFRAG;
else
stopat = MTDSWAP_HIFRAG;

View File

@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
This enables the driver for the NAND flash device found on
PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
PXA3xx processors (NFCv1) and also on 32-bit Armada
platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
platforms (7K, 8K) (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"

View File

@ -59,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o

View File

@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL;
* Define partitions for flash devices
*/
static struct mtd_partition partition_info[] = {
static const struct mtd_partition partition_info[] = {
{ .name = "Kernel",
.offset = 0,
.size = 3 * SZ_1M + SZ_512K },

View File

@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8;
if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
(mtd->writesize <= 512 && chip->chipsize > SZ_32M))
if (chip->options & NAND_ROW_ADDR_3)
nc->op.addrs[nc->op.naddrs++] = page >> 16;
}
}
@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand;
if (nc->pmecc)
atmel_pmecc_reset(nc->pmecc);
list_for_each_entry(nand, &nc->chips, node) {
int i;
@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
.pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove,

View File

@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
}
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
{
writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{
struct atmel_pmecc *pmecc = user->pmecc;
@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{
struct atmel_pmecc *pmecc = user->pmecc;
writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
atmel_pmecc_reset(user->pmecc);
mutex_unlock(&user->pmecc->lock);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
/* Reset the ECC engine */
writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
atmel_pmecc_reset(pmecc);
return pmecc;
}

View File

@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);

View File

@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
ctx->write_byte(mtd, (u8)(page_addr >> 8));
/* One more address cycle for devices > 32MiB */
if (this->chipsize > (32 << 20))
if (this->options & NAND_ROW_ADDR_3)
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}

View File

@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io;
/*
* Define static partitions for flash device
*/
static struct mtd_partition partition_info[] = {
static const struct mtd_partition partition_info[] = {
[0] = {
.name = "cmx270-0",
.offset = 0,

View File

@ -10,20 +10,18 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/mtd/mtd.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "denali.h"
@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
#define DENALI_NAND_NAME "denali-nand"
/* Host Data/Command Interface */
#define DENALI_HOST_ADDR 0x00
#define DENALI_HOST_DATA 0x10
/* for Indexed Addressing */
#define DENALI_INDEXED_CTRL 0x00
#define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
*/
#define DENALI_CLK_X_MULT 6
/*
* this macro allows us to convert from an MTD structure to our own
* device context (denali) structure.
*/
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
}
static void denali_host_write(struct denali_nand_info *denali,
uint32_t addr, uint32_t data)
/*
* Direct Addressing - the slave address forms the control information (command
* type, bank, block, and page address). The slave data is the actual data to
* be transferred. This mode requires 28 bits of address region allocated.
*/
static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
{
iowrite32(addr, denali->host + DENALI_HOST_ADDR);
iowrite32(data, denali->host + DENALI_HOST_DATA);
return ioread32(denali->host + addr);
}
static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
u32 data)
{
iowrite32(data, denali->host + addr);
}
/*
* Indexed Addressing - address translation module intervenes in passing the
* control information. This mode reduces the required address range. The
* control information and transferred data are latched by the registers in
* the translation module.
*/
static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
return ioread32(denali->host + DENALI_INDEXED_DATA);
}
static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
u32 data)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
/*
* Use the configuration feature register to determine the maximum number of
* banks that the hardware supports.
*/
static void detect_max_banks(struct denali_nand_info *denali)
static void denali_detect_max_banks(struct denali_nand_info *denali)
{
uint32_t features = ioread32(denali->reg + FEATURES);
denali->max_banks = 1 << (features & FEATURES__N_BANKS);
denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501)
@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
msecs_to_jiffies(1000));
if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
denali->irq_mask);
irq_mask);
return 0;
}
@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status;
}
/*
* This helper function setups the registers for ECC and whether or not
* the spare area will be transferred.
*/
static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
bool transfer_spare)
{
int ecc_en_flag, transfer_spare_flag;
/* set ECC, transfer spare bits if needed */
ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
/* Enable spare area/ECC per user's request. */
iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
}
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len; i++)
buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
buf[i] = denali->host_read(denali, addr);
}
static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len; i++)
iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
denali->host_write(denali, addr, buf[i]);
}
static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf;
int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len / 2; i++)
buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
buf16[i] = denali->host_read(denali, addr);
}
static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf;
int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len / 2; i++)
iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
denali->host_write(denali, addr, buf16[i]);
}
static uint8_t denali_read_byte(struct mtd_info *mtd)
@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
if (ctrl & NAND_CTRL_CHANGE)
denali_reset_irq(denali);
denali_host_write(denali, DENALI_BANK(denali) | type, dat);
denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
static int denali_dev_ready(struct mtd_info *mtd)
@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return 0;
}
max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/*
* The register holds the maximum of per-sector corrected bitflips.
@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf)
@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
err_sector = ECC_SECTOR(err_addr);
err_byte = ECC_BYTE(err_addr);
err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
err_device = ECC_ERR_DEVICE(err_cor_info);
err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
err_cor_info);
err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
err_cor_info);
/* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector)
bitflips = 0;
if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/*
* Check later if this is a real ECC error, or
* an erased sector.
@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
}
prev_sector = err_sector;
} while (!ECC_LAST_ERR(err_cor_info));
} while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/*
* Once handle all ecc errors, controller will trigger a
* ECC_TRANSACTION_DONE interrupt, so here just wait for
* a while for this interrupt
* Once handle all ECC errors, controller will trigger an
* ECC_TRANSACTION_DONE interrupt.
*/
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
/* programs the controller to either enable/disable DMA transfers */
static void denali_enable_dma(struct denali_nand_info *denali, bool en)
{
iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
ioread32(denali->reg + DMA_ENABLE);
}
static void denali_setup_dma64(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
denali_host_write(denali, mode,
0x01002000 | (64 << 16) | (write << 8) | page_count);
denali->host_write(denali, mode,
0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */
denali_host_write(denali, mode, dma_addr);
denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */
denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
static void denali_setup_dma32(struct denali_nand_info *denali,
@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
denali_host_write(denali, mode | page,
0x2000 | (write << 8) | page_count);
denali->host_write(denali, mode | page,
0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */
denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
denali_host_write(denali, mode | 0x14000, 0x2400);
}
static void denali_setup_dma(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
if (denali->caps & DENALI_CAP_DMA_64BIT)
denali_setup_dma64(denali, dma_addr, page, write);
else
denali_setup_dma32(denali, dma_addr, page, write);
denali->host_write(denali, mode | 0x14000, 0x2400);
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw)
{
uint32_t addr = DENALI_BANK(denali) | page;
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status, ecc_err_mask;
int i;
@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
*buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
*buf32++ = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
static int denali_pio_write(struct denali_nand_info *denali,
const void *buf, size_t size, int page, int raw)
{
uint32_t addr = DENALI_BANK(denali) | page;
u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status;
int i;
denali_reset_irq(denali);
iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
denali->host_write(denali, addr, *buf32++);
irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ecc_err_mask = INTR__ECC_ERR;
}
denali_enable_dma(denali, true);
iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
denali_setup_dma(denali, dma_addr, page, write);
denali->setup_dma(denali, dma_addr, page, write);
/* wait for operation to complete */
irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO;
else if (irq_status & ecc_err_mask)
ret = -EBADMSG;
denali_enable_dma(denali, false);
iowrite32(0, denali->reg + DMA_ENABLE);
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write)
{
setup_ecc_for_xfer(denali, !raw, raw);
iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, raw, write);
@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
denali_reset_irq(denali);
denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
DENALI_ERASE);
denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
DENALI_ERASE);
/* wait for erase to complete or failure to occur */
irq_status = denali_wait_for_irq(denali,
@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
tmp |= acc_clks;
tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
tmp |= re_2_we;
tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
tmp |= re_2_re;
tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
iowrite32(tmp, denali->reg + RE_2_RE);
/* tWHR -> WE_2_RE */
we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
/*
* tCCS, tWHR -> WE_2_RE
*
* With WE_2_RE properly set, the Denali controller automatically takes
* care of the delay; the driver need not set NAND_WAIT_TCCS.
*/
we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
t_clk);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
tmp |= we_2_re;
tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
/* tADL -> ADDR_2_DATA */
@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
tmp &= ~addr_2_data_mask;
tmp |= addr_2_data;
tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
/* tREH, tWH -> RDWR_EN_HI_CNT */
@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
tmp |= rdwr_en_hi;
tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
tmp |= rdwr_en_lo;
tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
tmp |= cs_setup;
tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
iowrite32(tmp, denali->reg + CS_SETUP_CNT);
return 0;
@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
* if this value is 0, just let it be.
*/
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
detect_max_banks(denali);
denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
/* Should set value for these registers when init */
iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, denali->reg + ECC_ENABLE);
}
int denali_calc_ecc_bytes(int step_size, int strength)
@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
/* initialize driver data structures */
static void denali_drv_init(struct denali_nand_info *denali)
{
/*
* the completion object will be used to notify
* the callee that the interrupt is done
*/
init_completion(&denali->complete);
/*
* the spinlock will be used to synchronize the ISR with any
* element that might be access shared data (interrupt status)
*/
spin_lock_init(&denali->irq_lock);
}
static int denali_multidev_fixup(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
u32 features = ioread32(denali->reg + FEATURES);
int ret;
mtd->dev.parent = denali->dev;
denali_hw_init(denali);
denali_drv_init(denali);
init_completion(&denali->complete);
spin_lock_init(&denali->irq_lock);
denali_clear_irq_all(denali);
/* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) {
@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
/* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip;
chip->read_byte = denali_read_byte;
chip->write_byte = denali_write_byte;
@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc;
if (features & FEATURES__INDEX_ADDR) {
denali->host_read = denali_indexed_read;
denali->host_write = denali_indexed_write;
} else {
denali->host_read = denali_direct_read;
denali->host_write = denali_direct_write;
}
/* clk rate info is needed for setup_data_interface */
if (denali->clk_x_rate)
chip->setup_data_interface = denali_setup_data_interface;
/*
* scan for NAND devices attached to the controller
* this is the first stage in a two step process to register
* with the nand subsystem
*/
ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret)
goto disable_irq;
@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
if (denali->dma_avail) {
chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16;
if (denali->caps & DENALI_CAP_DMA_64BIT)
denali->setup_dma = denali_setup_dma64;
else
denali->setup_dma = denali_setup_dma32;
}
/*
* second stage of the NAND scan
* this stage requires information regarding ECC and
* bad block management.
*/
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
/* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE;
ret = denali_ecc_setup(mtd, chip, denali);
@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION);
iowrite32(mtd->erasesize / mtd->writesize,
denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH);
iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
@ -1441,7 +1403,6 @@ disable_irq:
}
EXPORT_SYMBOL(denali_init);
/* driver exit point */
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);

View File

@ -10,18 +10,16 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef __DENALI_H__
#define __DENALI_H__
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK(bank) BIT(bank)
@ -111,9 +109,6 @@
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE GENMASK(4, 0)
#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
#define MAKE_ECC_CORRECTION(val, thresh) \
(((val) & (ECC_CORRECTION__VALUE)) | \
(((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0
#define READ_MODE__VALUE GENMASK(3, 0)
@ -255,13 +250,13 @@
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12)
#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640
#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0)
#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8)
#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14)
#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15)
#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
#define ERR_CORRECTION_INFO__UNCOR BIT(14)
#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
@ -310,23 +305,24 @@ struct denali_nand_info {
struct device *dev;
void __iomem *reg; /* Register Interface */
void __iomem *host; /* Host Data/Command Interface */
/* elements used by ISR */
struct completion complete;
spinlock_t irq_lock;
uint32_t irq_mask;
uint32_t irq_status;
spinlock_t irq_lock; /* protect irq_mask and irq_status */
u32 irq_mask; /* interrupts we are waiting for */
u32 irq_status; /* interrupts that have happened */
int irq;
void *buf;
void *buf; /* for syndrome layout conversion */
dma_addr_t dma_addr;
int dma_avail;
int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
int oob_skip_bytes;
int oob_skip_bytes; /* number of bytes reserved for BBM */
int max_banks;
unsigned int revision;
unsigned int caps;
unsigned int revision; /* IP revision */
unsigned int caps; /* IP capability (or quirk) */
const struct nand_ecc_caps *ecc_caps;
u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
int page, int write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)

View File

@ -12,15 +12,16 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "denali.h"
@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
.of_match_table = denali_nand_dt_ids,
},
};
module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");

View File

@ -11,6 +11,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@ -106,7 +109,6 @@ failed_remap_reg:
return ret;
}
/* driver exit point */
static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_nand_info *denali = pci_get_drvdata(dev);
@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
.probe = denali_pci_probe,
.remove = denali_pci_remove,
};
module_pci_driver(denali_pci_driver);

View File

@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
/* One more address cycle for higher density devices */
if (this->chipsize & 0x0c000000) {
if (this->options & NAND_ROW_ADDR_3) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n");
}

View File

@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
@ -31,12 +31,16 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
struct gpio_desc *nce; /* Optional chip enable */
struct gpio_desc *cle;
struct gpio_desc *ale;
struct gpio_desc *rdy;
struct gpio_desc *nwp; /* Optional write protection */
};
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) {
if (gpio_is_valid(gpiomtd->plat.gpio_nce))
gpio_set_value(gpiomtd->plat.gpio_nce,
!(ctrl & NAND_NCE));
gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
if (gpiomtd->nce)
gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd);
}
if (cmd == NAND_CMD_NONE)
@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
return gpio_get_value(gpiomtd->plat.gpio_rdy);
return gpiod_get_value(gpiomtd->rdy);
}
#ifdef CONFIG_OF
@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
}
}
plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
plat->gpio_nce = of_get_gpio(dev->of_node, 1);
plat->gpio_ale = of_get_gpio(dev->of_node, 2);
plat->gpio_cle = of_get_gpio(dev->of_node, 3);
plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val;
@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&gpiomtd->nand_chip));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
if (gpio_is_valid(gpiomtd->plat.gpio_nce))
gpio_set_value(gpiomtd->plat.gpio_nce, 1);
/* Enable write protection and disable the chip */
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_set_value(gpiomtd->nwp, 0);
if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
gpiod_set_value(gpiomtd->nce, 0);
return 0;
}
@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
struct device *dev = &pdev->dev;
int ret = 0;
if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL;
gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd)
return -ENOMEM;
chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
if (IS_ERR(chip->IO_ADDR_R))
return PTR_ERR(chip->IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev);
if (res) {
gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync);
}
ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret)
return ret;
if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
"NAND NCE");
if (ret)
return ret;
gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
/* Just enable the chip */
gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
if (IS_ERR(gpiomtd->nce))
return PTR_ERR(gpiomtd->nce);
/* We disable write protection once we know probe() will succeed */
gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
if (IS_ERR(gpiomtd->nwp)) {
ret = PTR_ERR(gpiomtd->nwp);
goto out_ce;
}
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
"NAND NWP");
if (ret)
return ret;
gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
if (IS_ERR(gpiomtd->nwp)) {
ret = PTR_ERR(gpiomtd->nwp);
goto out_ce;
}
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
if (ret)
return ret;
gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
if (IS_ERR(gpiomtd->cle)) {
ret = PTR_ERR(gpiomtd->cle);
goto out_ce;
}
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
if (ret)
return ret;
gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
"NAND RDY");
if (ret)
return ret;
gpio_direction_input(gpiomtd->plat.gpio_rdy);
gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
if (IS_ERR(gpiomtd->rdy)) {
ret = PTR_ERR(gpiomtd->rdy);
goto out_ce;
}
/* Using RDY pin */
if (gpiomtd->rdy)
chip->dev_ready = gpio_nand_devready;
}
nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R;
@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip);
mtd->dev.parent = &pdev->dev;
mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd);
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
/* Disable write protection, if wired up */
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_direction_output(gpiomtd->nwp, 1);
ret = nand_scan(mtd, 1);
if (ret)
@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
return 0;
err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_set_value(gpiomtd->nwp, 0);
out_ce:
if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
gpiod_set_value(gpiomtd->nce, 0);
return ret;
}

View File

@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8);
host->addr_cycle += 2;
/* One more address cycle for devices > 128MiB */
if (chip->chipsize > (128 << 20)) {
if (chip->options & NAND_ROW_ADDR_3) {
host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;

View File

@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0;
complete(&ecc->done);
} else {
@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
}
}
writel(0, ecc->regs + ECC_IRQ_REG(op));
return IRQ_HANDLED;
}
@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
if (op == ECC_DECODE)
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));

View File

@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
pr_debug("%s: RESET failed\n", __func__);
dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
return -EBADMSG;
}
@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
do {
err = ecc_stat & ecc_bit_mask;
if (err > err_limit) {
printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
return -EBADMSG;
} else {
ret += err;
@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
}
@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
host->buf_start++;
}
pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
if (nand_chip->options & NAND_ROW_ADDR_3) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */

View File

@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section)
if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
/* One more address cycle for devices > 32MiB */
if (chip->chipsize > (32 << 20))
if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE);
/* One more address cycle for devices > 128MiB */
if (chip->chipsize > (128 << 20))
if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0;
}
EXPORT_SYMBOL_GPL(nand_reset);
/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
chip->select_chip(mtd, chipnr);
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
@ -3999,6 +4001,9 @@ ident_done:
chip->chip_shift += 32 - 1;
}
if (chip->chip_shift - chip->page_shift > 16)
chip->options |= NAND_ROW_ADDR_3;
chip->badblockbits = 8;
chip->erase = single_erase;
@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break;
default:
/*
* Expose the whole OOB area to users if ECC_NONE
* is passed. We could do that for all kind of
* ->oobsize, but we must keep the old large/small
* page with ECC layout when ->oobsize <= 128 for
* compatibility reasons.
*/
if (ecc->mode == NAND_ECC_NONE) {
mtd_set_ooblayout(mtd,
&nand_ooblayout_lp_ops);
break;
}
WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;

View File

@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
struct dentry *root = nsmtd->dbg.dfs_dir;
struct dentry *dent;
if (!IS_ENABLED(CONFIG_DEBUG_FS))
/*
* Just skip debugfs initialization when the debugfs directory is
* missing.
*/
if (IS_ERR_OR_NULL(root)) {
if (IS_ENABLED(CONFIG_DEBUG_FS) &&
!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return 0;
if (IS_ERR_OR_NULL(root))
return -1;
}
dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
root, dev, &dfs_fops);

View File

@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (page_addr != -1) {
write_addr_reg(nand, page_addr);
if (chip->chipsize > (128 << 20)) {
if (chip->options & NAND_ROW_ADDR_3) {
write_addr_reg(nand, page_addr >> 8);
write_addr_reg(nand, page_addr >> 16 | ENDADDR);
} else {

View File

@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
* omap_calculate_ecc_bch - Generate bytes of ECC bytes
* _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
* @i: The sector number (for a multi sector page)
*
* Support calculating of BCH4/8 ecc vectors for the page
* Support calculating of BCH4/8/16 ECC vectors for one sector
* within a page. Sector number is in @i.
*/
static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc, int i)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val;
int i, j;
int j;
ecc_code = ecc_calc;
switch (info->ecc_opt) {
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH8_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
*ecc_code++ = (bch_val3 & 0xFF);
*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
*ecc_code++ = (bch_val2 & 0xFF);
*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH4_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
((bch_val1 >> 28) & 0xF);
*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
case OMAP_ECC_BCH16_CODE_HW:
val = readl(gpmc_regs->gpmc_bch_result6[i]);
ecc_code[0] = ((val >> 8) & 0xFF);
ecc_code[1] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result5[i]);
ecc_code[2] = ((val >> 24) & 0xFF);
ecc_code[3] = ((val >> 16) & 0xFF);
ecc_code[4] = ((val >> 8) & 0xFF);
ecc_code[5] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result4[i]);
ecc_code[6] = ((val >> 24) & 0xFF);
ecc_code[7] = ((val >> 16) & 0xFF);
ecc_code[8] = ((val >> 8) & 0xFF);
ecc_code[9] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result3[i]);
ecc_code[10] = ((val >> 24) & 0xFF);
ecc_code[11] = ((val >> 16) & 0xFF);
ecc_code[12] = ((val >> 8) & 0xFF);
ecc_code[13] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result2[i]);
ecc_code[14] = ((val >> 24) & 0xFF);
ecc_code[15] = ((val >> 16) & 0xFF);
ecc_code[16] = ((val >> 8) & 0xFF);
ecc_code[17] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result1[i]);
ecc_code[18] = ((val >> 24) & 0xFF);
ecc_code[19] = ((val >> 16) & 0xFF);
ecc_code[20] = ((val >> 8) & 0xFF);
ecc_code[21] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result0[i]);
ecc_code[22] = ((val >> 24) & 0xFF);
ecc_code[23] = ((val >> 16) & 0xFF);
ecc_code[24] = ((val >> 8) & 0xFF);
ecc_code[25] = ((val >> 0) & 0xFF);
break;
default:
return -EINVAL;
}
/* ECC scheme specific syndrome customizations */
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH16_CODE_HW:
break;
default:
return -EINVAL;
}
return 0;
}
/**
* omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
* when SW based correction is required as ECC is required for one sector
* at a time.
*/
static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
{
return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
}
/**
* omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
*/
static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
unsigned long nsectors;
int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
ecc_code = ecc_calc;
switch (info->ecc_opt) {
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH8_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
*ecc_code++ = (bch_val3 & 0xFF);
*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
*ecc_code++ = (bch_val2 & 0xFF);
*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH4_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
((bch_val1 >> 28) & 0xF);
*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
case OMAP_ECC_BCH16_CODE_HW:
val = readl(gpmc_regs->gpmc_bch_result6[i]);
ecc_code[0] = ((val >> 8) & 0xFF);
ecc_code[1] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result5[i]);
ecc_code[2] = ((val >> 24) & 0xFF);
ecc_code[3] = ((val >> 16) & 0xFF);
ecc_code[4] = ((val >> 8) & 0xFF);
ecc_code[5] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result4[i]);
ecc_code[6] = ((val >> 24) & 0xFF);
ecc_code[7] = ((val >> 16) & 0xFF);
ecc_code[8] = ((val >> 8) & 0xFF);
ecc_code[9] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result3[i]);
ecc_code[10] = ((val >> 24) & 0xFF);
ecc_code[11] = ((val >> 16) & 0xFF);
ecc_code[12] = ((val >> 8) & 0xFF);
ecc_code[13] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result2[i]);
ecc_code[14] = ((val >> 24) & 0xFF);
ecc_code[15] = ((val >> 16) & 0xFF);
ecc_code[16] = ((val >> 8) & 0xFF);
ecc_code[17] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result1[i]);
ecc_code[18] = ((val >> 24) & 0xFF);
ecc_code[19] = ((val >> 16) & 0xFF);
ecc_code[20] = ((val >> 8) & 0xFF);
ecc_code[21] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result0[i]);
ecc_code[22] = ((val >> 24) & 0xFF);
ecc_code[23] = ((val >> 16) & 0xFF);
ecc_code[24] = ((val >> 8) & 0xFF);
ecc_code[25] = ((val >> 0) & 0xFF);
break;
default:
return -EINVAL;
}
ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
if (ret)
return ret;
/* ECC scheme specific syndrome customizations */
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH16_CODE_HW:
break;
default:
return -EINVAL;
}
ecc_calc += eccbytes;
ecc_calc += eccbytes;
}
return 0;
@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */
chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
/**
* omap_write_subpage_bch - BCH hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
* @offset: column address of subpage within the page
* @data_len: data length
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* OMAP optimized subpage write method.
*/
static int omap_write_subpage_bch(struct mtd_info *mtd,
struct nand_chip *chip, u32 offset,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
u8 *ecc_calc = chip->buffers->ecccalc;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
u32 start_step = offset / ecc_size;
u32 end_step = (offset + data_len - 1) / ecc_size;
int step, ret = 0;
/*
* Write entire page at one go as it would be optimal
* as ECC is calculated by hardware.
* ECC is calculated for all subpages but we choose
* only what we want.
*/
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* Write data */
chip->write_buf(mtd, buf, mtd->writesize);
for (step = 0; step < ecc_steps; step++) {
/* mask ECC of un-touched subpages by padding 0xFF */
if (step < start_step || step > end_step)
memset(ecc_calc, 0xff, ecc_bytes);
else
ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
if (ret)
return ret;
buf += ecc_size;
ecc_calc += ecc_bytes;
}
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
ecc_calc = chip->buffers->ecccalc;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/**
* omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure
@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total);
/* Calculate ecc bytes */
chip->ecc.calculate(mtd, buf, ecc_calc);
omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
return true;
}
static bool omap2_nand_ecc_check(struct omap_nand_info *info,
struct omap_nand_platform_data *pdata)
static bool omap2_nand_ecc_check(struct omap_nand_info *info)
{
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
if (dev->of_node) {
if (omap_get_dt_info(dev, info))
return -EINVAL;
} else {
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "platform data missing\n");
return -EINVAL;
}
err = omap_get_dt_info(dev, info);
if (err)
return err;
info->gpmc_cs = pdata->cs;
info->reg = pdata->reg;
info->ecc_opt = pdata->ecc_opt;
if (pdata->dev_ready)
dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
info->xfer_type = pdata->xfer_type;
info->devsize = pdata->devsize;
info->elm_of_node = pdata->elm_of_node;
info->flash_bbt = pdata->flash_bbt;
}
platform_set_drvdata(pdev, info);
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
if (!omap2_nand_ecc_check(info, pdata)) {
if (!omap2_nand_ecc_check(info)) {
err = -EINVAL;
goto return_error;
}
@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@ -2167,10 +2255,9 @@ scan_tail:
if (err)
goto return_error;
if (dev->of_node)
mtd_device_register(mtd, NULL, 0);
else
mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
err = mtd_device_register(mtd, NULL, 0);
if (err)
goto return_error;
platform_set_drvdata(pdev, mtd);

View File

@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
#define NAND_STOP_DELAY msecs_to_jiffies(40)
@ -45,6 +47,10 @@
*/
#define INIT_BUFFER_SIZE 2048
/* System control register and bit to enable NAND on some SoCs */
#define GENCONF_SOC_DEVICE_MUX 0x208
#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@ -174,6 +180,7 @@ enum {
enum pxa3xx_nand_variant {
PXA3XX_NAND_VARIANT_PXA,
PXA3XX_NAND_VARIANT_ARMADA370,
PXA3XX_NAND_VARIANT_ARMADA_8K,
};
struct pxa3xx_nand_host {
@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,armada370-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
},
{
.compatible = "marvell,armada-8k-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
},
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
info->retcode = ERR_UNCORERR;
if (status & NDSR_CORERR) {
info->retcode = ERR_CORERR;
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
info->ecc_bch)
info->ecc_err_cnt = NDSR_ERR_CNT(status);
else
@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb2);
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDCB0, info->ndcb3);
}
@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->options |= NAND_BUSWIDTH_16;
/* Device detection must be done with ECC disabled */
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDECCCTRL, 0x0);
if (pdata->flash_bbt)
@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
* (aka splitted) command handling,
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (!of_id)
return 0;
/*
* Some SoCs like A7k/A8k need to enable manually the NAND
* controller to avoid being bootloader dependent. This is done
* through the use of a single bit in the System Functions registers.
*/
if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
pdev->dev.of_node, "marvell,system-controller");
u32 reg;
if (IS_ERR(sysctrl_base))
return PTR_ERR(sysctrl_base);
regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;

View File

@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/dma/qcom_bam_dma.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
/* Returns the NAND register physical address */
#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
/* Returns the dma address for reg read buffer */
#define reg_buf_dma_addr(chip, vaddr) \
((chip)->reg_read_dma + \
((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
* @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
* for current sgl
* @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx.
@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @rx_sgl_start - start index in data sgl for rx.
*/
struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos;
u32 cmd_sgl_start;
u32 tx_sgl_pos;
@ -307,7 +325,8 @@ struct nandc_regs {
* controller
* @dev: parent device
* @base: MMIO base
* @base_dma: physical base address of controller registers
* @base_phys: physical base address of controller registers
* @base_dma: dma base address of controller registers
* @core_clk: controller clock
* @aon_clk: another controller clock
*
@ -340,6 +359,7 @@ struct qcom_nand_controller {
struct device *dev;
void __iomem *base;
phys_addr_t base_phys;
dma_addr_t base_dma;
struct clk *core_clk;
@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn_size =
sizeof(*bam_txn) + num_cw *
((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn);
bam_txn->bam_ce = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
if (!nandc->props->is_bam)
return;
bam_txn->bam_ce_pos = 0;
bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0;
@ -733,6 +760,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
return 0;
}
/*
* Prepares the command descriptor for BAM DMA which will be used for NAND
* register reads and writes. The command descriptor requires the command
* to be formed in command element type so this function uses the command
* element from bam transaction ce array and fills the same with required
* data. A single SGL can contain multiple command elements so
* NAND_BAM_NEXT_SGL will be used for starting the separate SGL
* after the current command element.
*/
static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
int reg_off, const void *vaddr,
int size, unsigned int flags)
{
int bam_ce_size;
int i, ret;
struct bam_cmd_element *bam_ce_buffer;
struct bam_transaction *bam_txn = nandc->bam_txn;
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
/* fill the command desc */
for (i = 0; i < size; i++) {
if (read)
bam_prep_ce(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i));
else
bam_prep_ce_le32(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}
bam_txn->bam_ce_pos += size;
/* use the separate sgl after this command */
if (flags & NAND_BAM_NEXT_SGL) {
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
bam_ce_size = (bam_txn->bam_ce_pos -
bam_txn->bam_ce_start) *
sizeof(struct bam_cmd_element);
sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
bam_ce_buffer, bam_ce_size);
bam_txn->cmd_sgl_pos++;
bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
if (flags & NAND_BAM_NWD) {
ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_FENCE |
DMA_PREP_CMD);
if (ret)
return ret;
}
}
return 0;
}
/*
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
{
bool flow_control = false;
void *vaddr;
int size;
if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
flow_control = true;
vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first);
size = num_regs * sizeof(u32);
vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
nandc->reg_read_pos += num_regs;
if (nandc->props->is_bam)
return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
num_regs, flags);
return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
flow_control = true;
return prep_adm_dma_desc(nandc, true, first, vaddr,
num_regs * sizeof(u32), flow_control);
}
/*
@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
bool flow_control = false;
struct nandc_regs *regs = nandc->regs;
void *vaddr;
int size;
vaddr = offset_to_nandc_reg(regs, first);
if (first == NAND_FLASH_CMD)
flow_control = true;
if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET)
vaddr = &regs->erased_cw_detect_cfg_set;
@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
size = num_regs * sizeof(u32);
if (nandc->props->is_bam)
return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
num_regs, flags);
return prep_adm_dma_desc(nandc, false, first, vaddr, size,
flow_control);
if (first == NAND_FLASH_CMD)
flow_control = true;
return prep_adm_dma_desc(nandc, false, first, vaddr,
num_regs * sizeof(u32), flow_control);
}
/*
@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_CMD);
if (r)
return r;
}
@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base);
nandc->base_phys = res->start;
nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
nandc->core_clk = devm_clk_get(dev, "core");

View File

@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
const struct of_device_id *match;
struct flctl_soc_config *config;
const struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
match = of_match_device(of_flctl_match, dev);
if (match)
config = (struct flctl_soc_config *)match->data;
else {
config = of_device_get_match_data(dev);
if (!config) {
dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL;
}

View File

@ -6,3 +6,11 @@ config MTD_PARSER_TRX
may contain up to 3/4 partitions (depending on the version).
This driver will parse TRX header and report at least two partitions:
kernel and rootfs.
config MTD_SHARPSL_PARTS
tristate "Sharp SL Series NAND flash partition parser"
depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST
help
This provides the read-only FTL logic necessary to read the partition
table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
partition parser using this code.

View File

@ -1 +1,2 @@
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o

View File

@ -0,0 +1,398 @@
/*
* sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
* for logical addressing, as used on the PXA models of the SHARP SL Series.
*
* Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com>
*
* Based on SHARP GPL 2.4 sources:
* http://support.ezaurus.com/developer/source/source_dl.asp
* drivers/mtd/nand/sharp_sl_logical.c
* linux/include/asm-arm/sharp_nand_logical.h
*
* Copyright (C) 2002 SHARP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/sizes.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
/* oob structure */
#define NAND_NOOB_LOGADDR_00 8
#define NAND_NOOB_LOGADDR_01 9
#define NAND_NOOB_LOGADDR_10 10
#define NAND_NOOB_LOGADDR_11 11
#define NAND_NOOB_LOGADDR_20 12
#define NAND_NOOB_LOGADDR_21 13
#define BLOCK_IS_RESERVED 0xffff
#define BLOCK_UNMASK_COMPLEMENT 1
/* factory defaults */
#define SHARPSL_NAND_PARTS 3
#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
#define SHARPSL_PARTINFO1_LADDR 0x00060000
#define SHARPSL_PARTINFO2_LADDR 0x00064000
#define BOOT_MAGIC 0x424f4f54
#define FSRO_MAGIC 0x4653524f
#define FSRW_MAGIC 0x46535257
/**
* struct sharpsl_ftl - Sharp FTL Logical Table
* @logmax: number of logical blocks
* @log2phy: the logical-to-physical table
*
* Structure containing the logical-to-physical translation table
* used by the SHARP SL FTL.
*/
struct sharpsl_ftl {
unsigned int logmax;
unsigned int *log2phy;
};
/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
{
u8 freebytes = 0;
int section = 0;
while (true) {
struct mtd_oob_region oobfree = { };
int ret, i;
ret = mtd_ooblayout_free(mtd, section++, &oobfree);
if (ret)
break;
if (!oobfree.length || oobfree.offset > 15 ||
(oobfree.offset + oobfree.length) < 8)
continue;
i = oobfree.offset >= 8 ? oobfree.offset : 8;
for (; i < oobfree.offset + oobfree.length && i < 16; i++)
freebytes |= BIT(i - 8);
if (freebytes == 0xff)
return 0;
}
return -ENOTSUPP;
}
static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
{
struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
ret = mtd_read_oob(mtd, offs, &ops);
if (ret != 0 || mtd->oobsize != ops.oobretlen)
return -1;
return 0;
}
/*
* The logical block number assigned to a physical block is stored in the OOB
* of the first page, in 3 16-bit copies with the following layout:
*
* 01234567 89abcdef
* -------- --------
* ECC BB xyxyxy
*
* When reading we check that the first two copies agree.
* In case of error, matching is tried using the following pairs.
* Reserved values 0xffff mean the block is kept for wear leveling.
*
* 01234567 89abcdef
* -------- --------
* ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9
* ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11
* ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13
*/
static int sharpsl_nand_get_logical_num(u8 *oob)
{
u16 us;
int good0, good1;
if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
good0 = NAND_NOOB_LOGADDR_00;
good1 = NAND_NOOB_LOGADDR_01;
} else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
good0 = NAND_NOOB_LOGADDR_10;
good1 = NAND_NOOB_LOGADDR_11;
} else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
good0 = NAND_NOOB_LOGADDR_20;
good1 = NAND_NOOB_LOGADDR_21;
} else {
return -EINVAL;
}
us = oob[good0] | oob[good1] << 8;
/* parity check */
if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
return -EINVAL;
/* reserved */
if (us == BLOCK_IS_RESERVED)
return BLOCK_IS_RESERVED;
return (us >> 1) & GENMASK(9, 0);
}
static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
{
unsigned int block_num, log_num, phymax;
loff_t block_adr;
u8 *oob;
int i, ret;
oob = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!oob)
return -ENOMEM;
phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
/* FTL reserves 5% of the blocks + 1 spare */
ftl->logmax = ((phymax * 95) / 100) - 1;
ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
GFP_KERNEL);
if (!ftl->log2phy) {
ret = -ENOMEM;
goto exit;
}
/* initialize ftl->log2phy */
for (i = 0; i < ftl->logmax; i++)
ftl->log2phy[i] = UINT_MAX;
/* create physical-logical table */
for (block_num = 0; block_num < phymax; block_num++) {
block_adr = block_num * mtd->erasesize;
if (mtd_block_isbad(mtd, block_adr))
continue;
if (sharpsl_nand_read_oob(mtd, block_adr, oob))
continue;
/* get logical block */
log_num = sharpsl_nand_get_logical_num(oob);
/* cut-off errors and skip the out-of-range values */
if (log_num > 0 && log_num < ftl->logmax) {
if (ftl->log2phy[log_num] == UINT_MAX)
ftl->log2phy[log_num] = block_num;
}
}
pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
phymax, ftl->logmax, phymax - ftl->logmax);
ret = 0;
exit:
kfree(oob);
return ret;
}
void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
{
kfree(ftl->log2phy);
}
static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
loff_t from,
size_t len,
void *buf,
struct sharpsl_ftl *ftl)
{
unsigned int log_num, final_log_num;
unsigned int block_num;
loff_t block_adr;
loff_t block_ofs;
size_t retlen;
int err;
log_num = mtd_div_by_eb((u32)from, mtd);
final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
return -EINVAL;
block_num = ftl->log2phy[log_num];
block_adr = block_num * mtd->erasesize;
block_ofs = mtd_mod_by_eb((u32)from, mtd);
err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
if (!err && retlen != len)
err = -EIO;
if (err)
pr_err("sharpslpart: error, read failed at %#llx\n",
block_adr + block_ofs);
return err;
}
/*
* MTD Partition Parser
*
* Sample values read from SL-C860
*
* # cat /proc/mtd
* dev: size erasesize name
* mtd0: 006d0000 00020000 "Filesystem"
* mtd1: 00700000 00004000 "smf"
* mtd2: 03500000 00004000 "root"
* mtd3: 04400000 00004000 "home"
*
* PARTITIONINFO1
* 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT....
* 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO....
* 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW....
*/
struct sharpsl_nand_partinfo {
__le32 start;
__le32 end;
__be32 magic;
u32 reserved;
};
static int sharpsl_nand_read_partinfo(struct mtd_info *master,
loff_t from,
size_t len,
struct sharpsl_nand_partinfo *buf,
struct sharpsl_ftl *ftl)
{
int ret;
ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
if (ret)
return ret;
/* check for magics */
if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
pr_err("sharpslpart: magic values mismatch\n");
return -EINVAL;
}
/* fixup for hardcoded value 64 MiB (for older models) */
buf[2].end = cpu_to_le32(master->size);
/* extra sanity check */
if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
pr_err("sharpslpart: partition sizes mismatch\n");
return -EINVAL;
}
return 0;
}
static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct sharpsl_ftl ftl;
struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
struct mtd_partition *sharpsl_nand_parts;
int err;
/* check that OOB bytes 8 to 15 used by the FTL are actually free */
err = sharpsl_nand_check_ooblayout(master);
if (err)
return err;
/* init logical mgmt (FTL) */
err = sharpsl_nand_init_ftl(master, &ftl);
if (err)
return err;
/* read and validate first partition table */
pr_info("sharpslpart: try reading first partition table\n");
err = sharpsl_nand_read_partinfo(master,
SHARPSL_PARTINFO1_LADDR,
sizeof(buf), buf, &ftl);
if (err) {
/* fallback: read second partition table */
pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
err = sharpsl_nand_read_partinfo(master,
SHARPSL_PARTINFO2_LADDR,
sizeof(buf), buf, &ftl);
}
/* cleanup logical mgmt (FTL) */
sharpsl_nand_cleanup_ftl(&ftl);
if (err) {
pr_err("sharpslpart: both partition tables are invalid\n");
return err;
}
sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) *
SHARPSL_NAND_PARTS, GFP_KERNEL);
if (!sharpsl_nand_parts)
return -ENOMEM;
/* original names */
sharpsl_nand_parts[0].name = "smf";
sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
le32_to_cpu(buf[0].start);
sharpsl_nand_parts[1].name = "root";
sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
le32_to_cpu(buf[1].start);
sharpsl_nand_parts[2].name = "home";
sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
le32_to_cpu(buf[2].start);
*pparts = sharpsl_nand_parts;
return SHARPSL_NAND_PARTS;
}
static struct mtd_part_parser sharpsl_mtd_parser = {
.parse_fn = sharpsl_parse_mtd_partitions,
.name = "sharpslpart",
};
module_mtd_part_parser(sharpsl_mtd_parser);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");

View File

@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
depends on OF && (ARM || COMPILE_TEST)
depends on OF && (ARM || ARM64 || COMPILE_TEST)
help
Enable support for the Cadence Quad SPI Flash controller.
@ -90,7 +90,7 @@ config SPI_INTEL_SPI
tristate
config SPI_INTEL_SPI_PCI
tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT
tristate "Intel PCH/PCU SPI flash PCI driver"
depends on X86 && PCI
select SPI_INTEL_SPI
help
@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
will be called intel-spi-pci.
config SPI_INTEL_SPI_PLATFORM
tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
tristate "Intel PCH/PCU SPI flash platform driver"
depends on X86
select SPI_INTEL_SPI
help

View File

@ -31,6 +31,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/timer.h>
@ -38,6 +39,9 @@
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 16
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
struct cqspi_st;
struct cqspi_flash_pdata {
@ -75,7 +79,9 @@ struct cqspi_st {
bool is_decoded_cs;
u32 fifo_depth;
u32 fifo_width;
bool rclk_en;
u32 trigger_address;
u32 wr_delay;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
/*
* As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
* Controller programming sequence, couple of cycles of
* QSPI_REF_CLK delay is required for the above bit to
* be internally synchronized by the QSPI module. Provide 5
* cycles of delay.
*/
if (cqspi->wr_delay)
ndelay(cqspi->wr_delay);
while (remaining > 0) {
write_bytes = remaining > page_size ? page_size : remaining;
@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
}
static void cqspi_readdata_capture(struct cqspi_st *cqspi,
const unsigned int bypass,
const bool bypass,
const unsigned int delay)
{
void __iomem *reg_base = cqspi->iobase;
@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor)
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
cqspi_delay(nor);
cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
f_pdata->read_delay);
}
if (switch_cs || switch_ck)
@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
return -ENXIO;
}
cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
return 0;
}
@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct cqspi_st *cqspi;
struct resource *res;
struct resource *res_ahb;
unsigned long data;
int ret;
int irq;
@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev)
return -ENXIO;
}
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
return ret;
}
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
goto probe_clk_failed;
}
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
data = (unsigned long)of_device_get_match_data(dev);
if (data & CQSPI_NEEDS_WR_DELAY)
cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev)
}
return ret;
probe_irq_failed:
cqspi_controller_enable(cqspi, 0);
probe_setup_failed:
cqspi_controller_enable(cqspi, 0);
probe_irq_failed:
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev)
clk_disable_unprepare(cqspi->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#endif
static const struct of_device_id cqspi_dt_ids[] = {
{.compatible = "cdns,qspi-nor",},
{
.compatible = "cdns,qspi-nor",
.data = (void *)0,
},
{
.compatible = "ti,k2g-qspi",
.data = (void *)CQSPI_NEEDS_WR_DELAY,
},
{ /* end of table */ }
};

View File

@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);

View File

@ -67,8 +67,6 @@
#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
#define PR_RPE BIT(15)
#define PR_BASE_MASK 0x3fff
/* Last PR is GPR0 */
#define PR_NUM (5 + 1)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
@ -90,20 +88,35 @@
#define OPMENU0 0x08
#define OPMENU1 0x0c
#define OPTYPE_READ_NO_ADDR 0
#define OPTYPE_WRITE_NO_ADDR 1
#define OPTYPE_READ_WITH_ADDR 2
#define OPTYPE_WRITE_WITH_ADDR 3
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
#define BYT_BCR 0xfc
#define BYT_BCR_WPD BIT(0)
#define BYT_FREG_NUM 5
#define BYT_PR_NUM 5
#define LPT_PR 0x74
#define LPT_SSFSTS_CTL 0x90
#define LPT_FREG_NUM 5
#define LPT_PR_NUM 5
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
#define BXT_PR_NUM 6
#define LVSCC 0xc4
#define UVSCC 0xc8
#define ERASE_OPCODE_SHIFT 8
#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define ERASE_64K_OPCODE_SHIFT 16
#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@ -117,8 +130,11 @@
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
* @writeable: Is the chip writeable
* @swseq: Use SW sequencer in register reads/writes
* @locked: Is SPI setting locked
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
@ -132,8 +148,11 @@ struct intel_spi {
void __iomem *pregs;
void __iomem *sregs;
size_t nregions;
size_t pr_num;
bool writeable;
bool swseq;
bool locked;
bool swseq_reg;
bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
u8 preopcodes[2];
@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
for (i = 0; i < ispi->nregions; i++)
dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
readl(ispi->base + FREG(i)));
for (i = 0; i < PR_NUM; i++)
for (i = 0; i < ispi->pr_num; i++)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
dev_dbg(ispi->dev, "Protected regions:\n");
for (i = 0; i < PR_NUM; i++) {
for (i = 0; i < ispi->pr_num; i++) {
u32 base, limit;
value = readl(ispi->pregs + PR(i));
@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
ispi->swseq ? 'S' : 'H');
ispi->swseq_reg ? 'S' : 'H');
dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
ispi->swseq_erase ? 'S' : 'H');
}
/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
static int intel_spi_init(struct intel_spi *ispi)
{
u32 opmenu0, opmenu1, val;
u32 opmenu0, opmenu1, lvscc, uvscc, val;
int i;
switch (ispi->info->type) {
@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
ispi->pregs = ispi->base + BYT_PR;
ispi->nregions = BYT_FREG_NUM;
ispi->pr_num = BYT_PR_NUM;
ispi->swseq_reg = true;
if (writeable) {
/* Disable write protection */
@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
ispi->pregs = ispi->base + LPT_PR;
ispi->nregions = LPT_FREG_NUM;
ispi->pr_num = LPT_PR_NUM;
ispi->swseq_reg = true;
break;
case INTEL_SPI_BXT:
ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
ispi->pregs = ispi->base + BXT_PR;
ispi->nregions = BXT_FREG_NUM;
ispi->pr_num = BXT_PR_NUM;
ispi->erase_64k = true;
break;
@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi)
return -EINVAL;
}
/* Disable #SMI generation */
/* Disable #SMI generation from HW sequencer */
val = readl(ispi->base + HSFSTS_CTL);
val &= ~HSFSTS_CTL_FSMIE;
writel(val, ispi->base + HSFSTS_CTL);
/*
* BIOS programs allowed opcodes and then locks down the register.
* So read back what opcodes it decided to support. That's the set
* we are going to support as well.
* Determine whether erase operation should use HW or SW sequencer.
*
* The HW sequencer has a predefined list of opcodes, with only the
* erase opcode being programmable in LVSCC and UVSCC registers.
* If these registers don't contain a valid erase opcode, erase
* cannot be done using HW sequencer.
*/
opmenu0 = readl(ispi->sregs + OPMENU0);
opmenu1 = readl(ispi->sregs + OPMENU1);
lvscc = readl(ispi->base + LVSCC);
uvscc = readl(ispi->base + UVSCC);
if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
ispi->swseq_erase = true;
/* SPI controller on Intel BXT supports 64K erase opcode */
if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
!(uvscc & ERASE_64K_OPCODE_MASK))
ispi->erase_64k = false;
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
* using software sequencer. If we find that BIOS has programmed
* opcodes for the software sequencer we use that over the hardware
* sequencer.
* using software sequencer.
*/
if (opmenu0 && opmenu1) {
for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
val = readl(ispi->sregs + PREOP_OPTYPE);
ispi->preopcodes[0] = val;
ispi->preopcodes[1] = val >> 8;
if (ispi->swseq_reg) {
/* Disable #SMI generation from SW sequencer */
val = readl(ispi->sregs + SSFSTS_CTL);
val &= ~SSFSTS_CTL_FSMIE;
writel(val, ispi->sregs + SSFSTS_CTL);
}
ispi->swseq = true;
/* Check controller's lock status */
val = readl(ispi->base + HSFSTS_CTL);
ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
if (ispi->locked) {
/*
* BIOS programs allowed opcodes and then locks down the
* register. So read back what opcodes it decided to support.
* That's the set we are going to support as well.
*/
opmenu0 = readl(ispi->sregs + OPMENU0);
opmenu1 = readl(ispi->sregs + OPMENU1);
if (opmenu0 && opmenu1) {
for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
val = readl(ispi->sregs + PREOP_OPTYPE);
ispi->preopcodes[0] = val;
ispi->preopcodes[1] = val >> 8;
}
}
intel_spi_dump_regs(ispi);
@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi)
return 0;
}
static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
{
int i;
int preop;
for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
if (ispi->opcodes[i] == opcode)
return i;
return -EINVAL;
if (ispi->locked) {
for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
if (ispi->opcodes[i] == opcode)
return i;
return -EINVAL;
}
/* The lock is off, so just use index 0 */
writel(opcode, ispi->sregs + OPMENU0);
preop = readw(ispi->sregs + PREOP_OPTYPE);
writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
return 0;
}
static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
int len)
static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
{
u32 val, status;
int ret;
@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return -EINVAL;
}
if (len > INTEL_SPI_FIFO_SZ)
return -EINVAL;
val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return 0;
}
static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
int len)
static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
int optype)
{
u32 val, status;
u32 val = 0, status;
u16 preop;
int ret;
ret = intel_spi_opcode_index(ispi, opcode);
ret = intel_spi_opcode_index(ispi, opcode, optype);
if (ret < 0)
return ret;
val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
if (len > INTEL_SPI_FIFO_SZ)
return -EINVAL;
/* Only mark 'Data Cycle' bit when there is data to be transferred */
if (len > 0)
val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO;
preop = readw(ispi->sregs + PREOP_OPTYPE);
if (preop) {
val |= SSFSTS_CTL_ACS;
if (preop >> 8)
val |= SSFSTS_CTL_SPOP;
}
writel(val, ispi->sregs + SSFSTS_CTL);
ret = intel_spi_wait_sw_busy(ispi);
if (ret)
return ret;
status = readl(ispi->base + SSFSTS_CTL);
status = readl(ispi->sregs + SSFSTS_CTL);
if (status & SSFSTS_CTL_FCERR)
return -EIO;
else if (status & SSFSTS_CTL_AEL)
@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/* Address of the first chip */
writel(0, ispi->base + FADDR);
if (ispi->swseq)
ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
if (ispi->swseq_reg)
ret = intel_spi_sw_cycle(ispi, opcode, len,
OPTYPE_READ_NO_ADDR);
else
ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
ret = intel_spi_hw_cycle(ispi, opcode, len);
if (ret)
return ret;
@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/*
* This is handled with atomic operation and preop code in Intel
* controller so skip it here now.
* controller so skip it here now. If the controller is not locked,
* program the opcode to the PREOP register for later use.
*/
if (opcode == SPINOR_OP_WREN)
if (opcode == SPINOR_OP_WREN) {
if (!ispi->locked)
writel(opcode, ispi->sregs + PREOP_OPTYPE);
return 0;
}
writel(0, ispi->base + FADDR);
@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
if (ret)
return ret;
if (ispi->swseq)
return intel_spi_sw_cycle(ispi, opcode, buf, len);
return intel_spi_hw_cycle(ispi, opcode, buf, len);
if (ispi->swseq_reg)
return intel_spi_sw_cycle(ispi, opcode, len,
OPTYPE_WRITE_NO_ADDR);
return intel_spi_hw_cycle(ispi, opcode, len);
}
static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCYCLE_WRITE;
/* Write enable */
if (ispi->preopcodes[1] == SPINOR_OP_WREN)
val |= SSFSTS_CTL_SPOP;
val |= SSFSTS_CTL_ACS;
writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_write_block(ispi, write_buf, block_size);
if (ret) {
dev_err(ispi->dev, "failed to write block\n");
@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
}
/* Start the write now */
val = readl(ispi->base + HSFSTS_CTL);
writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
val |= HSFSTS_CTL_FGO;
writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret) {
@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
erase_size = SZ_4K;
}
if (ispi->swseq_erase) {
while (len > 0) {
writel(offs, ispi->base + FADDR);
ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
0, OPTYPE_WRITE_WITH_ADDR);
if (ret)
return ret;
offs += erase_size;
len -= erase_size;
}
return 0;
}
while (len > 0) {
writel(offs, ispi->base + FADDR);
@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi,
{
int i;
for (i = 0; i < PR_NUM; i++) {
for (i = 0; i < ispi->pr_num; i++) {
u32 pr_base, pr_limit, pr_value;
pr_value = readl(ispi->pregs + PR(i));

View File

@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
return ret;
}
static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
{
clk_disable_unprepare(mt8173_nor->spi_clk);
clk_disable_unprepare(mt8173_nor->nor_clk);
}
static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
{
int ret;
ret = clk_prepare_enable(mt8173_nor->spi_clk);
if (ret)
return ret;
ret = clk_prepare_enable(mt8173_nor->nor_clk);
if (ret) {
clk_disable_unprepare(mt8173_nor->spi_clk);
return ret;
}
return 0;
}
static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct device_node *flash_node)
{
@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
return PTR_ERR(mt8173_nor->nor_clk);
mt8173_nor->dev = &pdev->dev;
ret = clk_prepare_enable(mt8173_nor->spi_clk);
ret = mt8173_nor_enable_clk(mt8173_nor);
if (ret)
return ret;
ret = clk_prepare_enable(mt8173_nor->nor_clk);
if (ret) {
clk_disable_unprepare(mt8173_nor->spi_clk);
return ret;
}
/* only support one attached flash */
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = mtk_nor_init(mt8173_nor, flash_np);
nor_free:
if (ret) {
clk_disable_unprepare(mt8173_nor->spi_clk);
clk_disable_unprepare(mt8173_nor->nor_clk);
}
if (ret)
mt8173_nor_disable_clk(mt8173_nor);
return ret;
}
@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
{
struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
clk_disable_unprepare(mt8173_nor->spi_clk);
clk_disable_unprepare(mt8173_nor->nor_clk);
mt8173_nor_disable_clk(mt8173_nor);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int mtk_nor_suspend(struct device *dev)
{
struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
mt8173_nor_disable_clk(mt8173_nor);
return 0;
}
static int mtk_nor_resume(struct device *dev)
{
struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
return mt8173_nor_enable_clk(mt8173_nor);
}
static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
.suspend = mtk_nor_suspend,
.resume = mtk_nor_resume,
};
#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops)
#else
#define MTK_NOR_DEV_PM_OPS NULL
#endif
static const struct of_device_id mtk_nor_of_ids[] = {
{ .compatible = "mediatek,mt8173-nor"},
{ /* sentinel */ }
@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = {
.remove = mtk_nor_drv_remove,
.driver = {
.name = "mtk-nor",
.pm = MTK_NOR_DEV_PM_OPS,
.of_match_table = mtk_nor_of_ids,
},
};

View File

@ -89,6 +89,8 @@ struct flash_info {
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
int (*quad_enable)(struct spi_nor *nor);
};
#define JEDEC_MFR(info) ((info)->id[0])
@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
static int macronix_quad_enable(struct spi_nor *nor);
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
/* Everspin */
{ "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@ -982,6 +987,11 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
"gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
"gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
.quad_enable = macronix_quad_enable,
},
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{
"w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
/* Check the SFDP header version. */
if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
header.major != SFDP_JESD216_MAJOR ||
header.minor < SFDP_JESD216_MINOR)
header.major != SFDP_JESD216_MAJOR)
return -EINVAL;
/*
@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
params->quad_enable = spansion_quad_enable;
break;
}
/*
* Some manufacturer like GigaDevice may use different
* bit to set QE on different memories, so the MFR can't
* indicate the quad_enable method for this case, we need
* set it in flash info list.
*/
if (info->quad_enable)
params->quad_enable = info->quad_enable;
}
/* Override the parameters with data read from SFDP tables. */
@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
/* Enable Quad I/O if needed. */
enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
spi_nor_get_protocol_width(nor->write_proto) == 4);
if (enable_quad_io && params->quad_enable) {
err = params->quad_enable(nor);
if (enable_quad_io && params->quad_enable)
nor->quad_enable = params->quad_enable;
else
nor->quad_enable = NULL;
return 0;
}
static int spi_nor_init(struct spi_nor *nor)
{
int err;
/*
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
* with the software protection bits set
*/
if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
nor->info->flags & SPI_NOR_HAS_LOCK) {
write_enable(nor);
write_sr(nor, 0);
spi_nor_wait_till_ready(nor);
}
if (nor->quad_enable) {
err = nor->quad_enable(nor);
if (err) {
dev_err(nor->dev, "quad mode not supported\n");
return err;
}
}
if ((nor->addr_width == 4) &&
(JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
!(nor->info->flags & SPI_NOR_4B_OPCODES))
set_4byte(nor, nor->info, 1);
return 0;
}
/* mtd resume handler */
static void spi_nor_resume(struct mtd_info *mtd)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
struct device *dev = nor->dev;
int ret;
/* re-initialize the nor chip */
ret = spi_nor_init(nor);
if (ret)
dev_err(dev, "resume() failed\n");
}
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
/*
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
* with the software protection bits set
*/
if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
JEDEC_MFR(info) == SNOR_MFR_INTEL ||
JEDEC_MFR(info) == SNOR_MFR_SST ||
info->flags & SPI_NOR_HAS_LOCK) {
write_enable(nor);
write_sr(nor, 0);
spi_nor_wait_till_ready(nor);
}
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
info->flags & SPI_NOR_4B_OPCODES)
spi_nor_set_4byte_opcodes(nor, info);
else
set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
}
/* Send all the required SPI flash commands to initialize device */
nor->info = info;
ret = spi_nor_init(nor);
if (ret)
return ret;
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);

View File

@ -1,9 +1,22 @@
/*
* stm32_quadspi.c
* Driver for stm32 quadspi controller
*
* Copyright (C) 2017, Ludovic Barre
* Copyright (C) 2017, STMicroelectronics - All Rights Reserved
* Author(s): Ludovic Barre author <ludovic.barre@st.com>.
*
* License terms: GNU General Public License (GPL), version 2
* License terms: GPL V2.0.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* This program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/errno.h>
@ -113,6 +126,7 @@
#define STM32_MAX_MMAP_SZ SZ_256M
#define STM32_MAX_NORCHIP 2
#define STM32_QSPI_FIFO_SZ 32
#define STM32_QSPI_FIFO_TIMEOUT_US 30000
#define STM32_QSPI_BUSY_TIMEOUT_US 100000
@ -124,6 +138,7 @@ struct stm32_qspi_flash {
u32 presc;
u32 read_mode;
bool registered;
u32 prefetch_limit;
};
struct stm32_qspi {
@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
STM32_QSPI_FIFO_TIMEOUT_US);
if (ret) {
dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
break;
return ret;
}
tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
}
return ret;
return 0;
}
static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
{
struct stm32_qspi *qspi = flash->qspi;
u32 ccr, dcr, cr;
u32 last_byte;
int err;
err = stm32_qspi_wait_nobusy(qspi);
@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
if (err)
goto abort;
writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
} else {
last_byte = cmd->addr + cmd->len;
if (last_byte > flash->prefetch_limit)
goto abort;
}
return err;
@ -322,7 +342,9 @@ abort:
cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
if (err)
dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
return err;
}
@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
}
flash->fsize = FSIZE_VAL(mtd->size);
flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
flash->read_mode = CCR_FMODE_MM;
if (mtd->size > qspi->mm_size)

View File

@ -267,7 +267,7 @@ struct mtd_info {
*/
unsigned int bitflip_threshold;
// Kernel-only stuff starts here.
/* Kernel-only stuff starts here. */
const char *name;
int index;
@ -297,10 +297,6 @@ struct mtd_info {
int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys);
int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
unsigned long len,
unsigned long offset,
unsigned long flags);
int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,

View File

@ -5,11 +5,6 @@
#include <linux/mtd/rawnand.h>
struct gpio_nand_platdata {
int gpio_nce;
int gpio_nwp;
int gpio_cle;
int gpio_ale;
int gpio_rdy;
void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
struct mtd_partition *parts;
unsigned int num_parts;

View File

@ -177,6 +177,9 @@ enum nand_ecc_algo {
*/
#define NAND_NEED_SCRAMBLING 0x00002000
/* Device needs 3rd row address cycle */
#define NAND_ROW_ADDR_3 0x00004000
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG

View File

@ -231,11 +231,18 @@ enum spi_nor_option_flags {
SNOR_F_USE_CLSR = BIT(5),
};
/**
* struct flash_info - Forward declaration of a structure used internally by
* spi_nor_scan()
*/
struct flash_info;
/**
* struct spi_nor - Structure for defining a the SPI NOR layer
* @mtd: point to a mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
* @dev: point to a spi device, or a spi nor controller device.
* @info: spi-nor part JDEC MFR id and other info
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@ -262,6 +269,7 @@ enum spi_nor_option_flags {
* @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
* @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
* @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
* @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode
* completely locked
* @priv: the private data
*/
@ -269,6 +277,7 @@ struct spi_nor {
struct mtd_info mtd;
struct mutex lock;
struct device *dev;
const struct flash_info *info;
u32 page_size;
u8 addr_width;
u8 erase_opcode;
@ -296,6 +305,7 @@ struct spi_nor {
int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
int (*quad_enable)(struct spi_nor *nor);
void *priv;
};

View File

@ -64,21 +64,4 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
};
struct omap_nand_platform_data {
int cs;
struct mtd_partition *parts;
int nr_parts;
bool flash_bbt;
enum nand_io xfer_type;
int devsize;
enum omap_ecc ecc_opt;
struct device_node *elm_of_node;
/* deprecated */
struct gpmc_nand_regs reg;
struct device_node *of_node;
bool dev_ready;
};
#endif

View File

@ -46,10 +46,6 @@ config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
config GENERIC_IO
bool
default n
config STMP_DEVICE
bool