1
0
Fork 0

Core changes:

* Support non-uniform erase size
 * Support controllers with limited TX fifo size
 
 Driver changes:
 * m25p80: Re-issue a WREN command after each write access
 * cadence: Pass a proper dir value to dma_[un]map_single()
 * fsl-qspi: Check fsl_qspi_get_seqid() return val make sure 4B
   addressing opcodes are properly handled
 * intel-spi: Add a new PCI entry for Ice Lake
 -----BEGIN PGP SIGNATURE-----
 
 iQI5BAABCAAjBQJbwJx7HBxib3Jpcy5icmV6aWxsb25AYm9vdGxpbi5jb20ACgkQ
 Ze02AX4ItwALIg//aRM/Nk7izL1XuZpYQdqWh+Gwvxnd7KPx7o/XRxuORzj/7Cep
 YTvkcIrxnc5/0wI43FEpMJT6ACVZ3fUVfOR5eUezMlqlrxwwxaeNLDSysEbvANXB
 rBwznfD9Nt92fykVOU6pInIuVcg6qB9iysegWidzQFPd7wVtg9R/7soP5apfAbD7
 OcS+R2LAXEApcgm8qzG3rLdpeApZ9pcGJYe/uCSFDCw7cWBHJzNwfVndjyZw1ieW
 sLFHlHdjybq9CURmjiIvCrnxLsmRkuvt51ij8Lr1Y6fsmSseeuwuOKKyzTDvo3Re
 NMkf/InvH5Nh+O0VD8rvwvpXTlRwcCvPuWfw7fTW8jLM+dhSZa+wAzavauv2LEzW
 9f1i+goFv+/ys+7I9spXvGJvZEuvdGHX14Eg5F6WufqTOUMwuZz7mhbbGRnn2cbD
 XW7YEVecUSWx57yFZ/PcI3ePVgd4G67kwjccsPyZyq2/aThSvbf4n9+FQ4V1o1nC
 8cb8MAEoAxyzE6jZPOb06FNNctBVtgHQVagsuwWZ3k2q7Pcg9z8r0iwqcqvJp9Pg
 nPiSEu4oAHA4QG5NeEvL2Ickt/Mu/il3JDFrsq3xfSUo0i8ANrl3shv2FRW/mLYV
 8bqlbCYoQsciEglbmEaoGj37OsZNDq0HzK9TWmimLMQp9fsV4f+92+rXYEI=
 =j2Li
 -----END PGP SIGNATURE-----

Merge tag 'spi-nor/for-4.20' of git://git.infradead.org/linux-mtd into mtd/next

Core changes:
* Support non-uniform erase size
* Support controllers with limited TX fifo size

Driver changes:
* m25p80: Re-issue a WREN command after each write access
* cadence: Pass a proper dir value to dma_[un]map_single()
* fsl-qspi: Check fsl_qspi_get_seqid() return val make sure 4B
  addressing opcodes are properly handled
* intel-spi: Add a new PCI entry for Ice Lake
hifive-unleashed-5.1
Boris Brezillon 2018-10-19 09:16:55 +02:00
commit 5cc1b66e63
6 changed files with 1001 additions and 86 deletions

View File

@ -70,7 +70,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(len, buf, 1));
size_t remaining = len;
int ret;
/* get transfer protocols. */
@ -81,22 +80,16 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
op.addr.nbytes = 0;
while (remaining) {
op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
ret = spi_mem_adjust_op_size(flash->spimem, &op);
if (ret)
return ret;
ret = spi_mem_adjust_op_size(flash->spimem, &op);
if (ret)
return ret;
op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes;
ret = spi_mem_exec_op(flash->spimem, &op);
if (ret)
return ret;
ret = spi_mem_exec_op(flash->spimem, &op);
if (ret)
return ret;
op.addr.val += op.data.nbytes;
remaining -= op.data.nbytes;
op.data.buf.out += op.data.nbytes;
}
return len;
return op.data.nbytes;
}
/*

View File

@ -959,7 +959,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
return 0;
}
dma_dst = dma_map_single(nor->dev, buf, len, DMA_DEV_TO_MEM);
dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(nor->dev, dma_dst)) {
dev_err(nor->dev, "dma mapping failed\n");
return -ENOMEM;
@ -994,7 +994,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
}
err_unmap:
dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM);
dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
return 0;
}

View File

@ -478,6 +478,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
{
switch (cmd) {
case SPINOR_OP_READ_1_1_4:
case SPINOR_OP_READ_1_1_4_4B:
return SEQID_READ;
case SPINOR_OP_WREN:
return SEQID_WREN;
@ -543,6 +544,9 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
/* trigger the LUT now */
seqid = fsl_qspi_get_seqid(q, cmd);
if (seqid < 0)
return seqid;
qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
base + QUADSPI_IPCR);
@ -671,7 +675,7 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
* causes the controller to clear the buffer, and use the sequence pointed
* by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
*/
static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
static int fsl_qspi_init_ahb_read(struct fsl_qspi *q)
{
void __iomem *base = q->iobase;
int seqid;
@ -696,8 +700,13 @@ static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
/* Set the default lut sequence for AHB Read. */
seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
if (seqid < 0)
return seqid;
qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
q->iobase + QUADSPI_BFGENCR);
return 0;
}
/* This function was used to prepare and enable QSPI clock */
@ -805,9 +814,7 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
fsl_qspi_init_lut(q);
/* Init for AHB read */
fsl_qspi_init_ahb_read(q);
return 0;
return fsl_qspi_init_ahb_read(q);
}
static const struct of_device_id fsl_qspi_dt_ids[] = {

View File

@ -65,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ },

File diff suppressed because it is too large Load Diff

View File

@ -238,6 +238,94 @@ enum spi_nor_option_flags {
SNOR_F_BROKEN_RESET = BIT(6),
};
/**
* struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
* @size: the size of the sector/block erased by the erase type.
* JEDEC JESD216B imposes erase sizes to be a power of 2.
* @size_shift: @size is a power of 2, the shift is stored in
* @size_shift.
* @size_mask: the size mask based on @size_shift.
* @opcode: the SPI command op code to erase the sector/block.
* @idx: Erase Type index as sorted in the Basic Flash Parameter
* Table. It will be used to synchronize the supported
* Erase Types with the ones identified in the SFDP
* optional tables.
*/
struct spi_nor_erase_type {
u32 size;
u32 size_shift;
u32 size_mask;
u8 opcode;
u8 idx;
};
/**
* struct spi_nor_erase_command - Used for non-uniform erases
* The structure is used to describe a list of erase commands to be executed
* once we validate that the erase can be performed. The elements in the list
* are run-length encoded.
* @list: for inclusion into the list of erase commands.
* @count: how many times the same erase command should be
* consecutively used.
* @size: the size of the sector/block erased by the command.
* @opcode: the SPI command op code to erase the sector/block.
*/
struct spi_nor_erase_command {
struct list_head list;
u32 count;
u32 size;
u8 opcode;
};
/**
* struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
* @offset: the offset in the data array of erase region start.
* LSB bits are used as a bitmask encoding flags to
* determine if this region is overlaid, if this region is
* the last in the SPI NOR flash memory and to indicate
* all the supported erase commands inside this region.
* The erase types are sorted in ascending order with the
* smallest Erase Type size being at BIT(0).
* @size: the size of the region in bytes.
*/
struct spi_nor_erase_region {
u64 offset;
u64 size;
};
#define SNOR_ERASE_TYPE_MAX 4
#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
#define SNOR_LAST_REGION BIT(4)
#define SNOR_OVERLAID_REGION BIT(5)
#define SNOR_ERASE_FLAGS_MAX 6
#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
/**
* struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
* @regions: array of erase regions. The regions are consecutive in
* address space. Walking through the regions is done
* incrementally.
* @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
* sector size (legacy implementation).
* @erase_type: an array of erase types shared by all the regions.
* The erase types are sorted in ascending order, with the
* smallest Erase Type size being the first member in the
* erase_type array.
* @uniform_erase_type: bitmask encoding erase types that can erase the
* entire memory. This member is completed at init by
* uniform and non-uniform SPI NOR flash memories if they
* support at least one erase type that can erase the
* entire memory.
*/
struct spi_nor_erase_map {
struct spi_nor_erase_region *regions;
struct spi_nor_erase_region uniform_region;
struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
u8 uniform_erase_type;
};
/**
* struct flash_info - Forward declaration of a structure used internally by
* spi_nor_scan()
@ -262,6 +350,7 @@ struct flash_info;
* @write_proto: the SPI protocol for write operations
* @reg_proto the SPI protocol for read_reg/write_reg/erase operations
* @cmd_buf: used by the write_reg
* @erase_map: the erase map of the SPI NOR
* @prepare: [OPTIONAL] do some preparations for the
* read/write/erase/lock/unlock operations
* @unprepare: [OPTIONAL] do some post work after the
@ -297,6 +386,7 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
struct spi_nor_erase_map erase_map;
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
@ -317,6 +407,35 @@ struct spi_nor {
void *priv;
};
static u64 __maybe_unused
spi_nor_region_is_last(const struct spi_nor_erase_region *region)
{
return region->offset & SNOR_LAST_REGION;
}
static u64 __maybe_unused
spi_nor_region_end(const struct spi_nor_erase_region *region)
{
return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
}
static void __maybe_unused
spi_nor_region_mark_end(struct spi_nor_erase_region *region)
{
region->offset |= SNOR_LAST_REGION;
}
static void __maybe_unused
spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
{
region->offset |= SNOR_OVERLAID_REGION;
}
static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
{
return !!nor->erase_map.uniform_erase_type;
}
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
struct device_node *np)
{