From df6bd6c002a4dee73d274a87ef1c0e36f21d2583 Mon Sep 17 00:00:00 2001 From: Ludovic Barre Date: Tue, 7 May 2019 11:16:38 +0200 Subject: [PATCH 01/60] mtd: spi-nor: stm32: remove the driver as it was replaced by spi-stm32-qspi.c There's a new driver using the SPI memory interface of the SPI framework at spi/spi-stm32-qspi.c, which can be used together with m25p80.c to replace the functionality of this SPI NOR driver. The "new" driver uses the same dt properties and not affects the legacy compatibility. Signed-off-by: Ludovic Barre Signed-off-by: Tudor Ambarus --- .../devicetree/bindings/mtd/stm32-quadspi.txt | 43 -- drivers/mtd/spi-nor/Kconfig | 7 - drivers/mtd/spi-nor/Makefile | 1 - drivers/mtd/spi-nor/stm32-quadspi.c | 720 ------------------ 4 files changed, 771 deletions(-) delete mode 100644 Documentation/devicetree/bindings/mtd/stm32-quadspi.txt delete mode 100644 drivers/mtd/spi-nor/stm32-quadspi.c diff --git a/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt b/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt deleted file mode 100644 index ddd18c135148..000000000000 --- a/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt +++ /dev/null @@ -1,43 +0,0 @@ -* STMicroelectronics Quad Serial Peripheral Interface(QuadSPI) - -Required properties: -- compatible: should be "st,stm32f469-qspi" -- reg: the first contains the register location and length. - the second contains the memory mapping address and length -- reg-names: should contain the reg names "qspi" "qspi_mm" -- interrupts: should contain the interrupt for the device -- clocks: the phandle of the clock needed by the QSPI controller -- A pinctrl must be defined to set pins in mode of operation for QSPI transfer - -Optional properties: -- resets: must contain the phandle to the reset controller. - -A spi flash must be a child of the nor_flash node and could have some -properties. Also see jedec,spi-nor.txt. - -Required properties: -- reg: chip-Select number (QSPI controller may connect 2 nor flashes) -- spi-max-frequency: max frequency of spi bus - -Optional property: -- spi-rx-bus-width: see ../spi/spi-bus.txt for the description - -Example: - -qspi: spi@a0001000 { - compatible = "st,stm32f469-qspi"; - reg = <0xa0001000 0x1000>, <0x90000000 0x10000000>; - reg-names = "qspi", "qspi_mm"; - interrupts = <91>; - resets = <&rcc STM32F4_AHB3_RESET(QSPI)>; - clocks = <&rcc 0 STM32F4_AHB3_CLOCK(QSPI)>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_qspi0>; - - flash@0 { - reg = <0>; - spi-rx-bus-width = <4>; - spi-max-frequency = <108000000>; - ... - }; -}; diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index dab986691267..54b5da049277 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -104,11 +104,4 @@ config SPI_INTEL_SPI_PLATFORM To compile this driver as a module, choose M here: the module will be called intel-spi-platform. -config SPI_STM32_QUADSPI - tristate "STM32 Quad SPI controller" - depends on ARCH_STM32 || COMPILE_TEST - help - This enables support for the STM32 Quad SPI controller. - We only connect the NOR to this controller. - endif # MTD_SPI_NOR diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index 189a15cca3ec..9c5ed03cdc19 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -8,4 +8,3 @@ obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o -obj-$(CONFIG_SPI_STM32_QUADSPI) += stm32-quadspi.o diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c deleted file mode 100644 index 13e9fc961d3b..000000000000 --- a/drivers/mtd/spi-nor/stm32-quadspi.c +++ /dev/null @@ -1,720 +0,0 @@ -/* - * Driver for stm32 quadspi controller - * - * Copyright (C) 2017, STMicroelectronics - All Rights Reserved - * Author(s): Ludovic Barre author . - * - * License terms: GPL V2.0. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along with - * This program. If not, see . - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define QUADSPI_CR 0x00 -#define CR_EN BIT(0) -#define CR_ABORT BIT(1) -#define CR_DMAEN BIT(2) -#define CR_TCEN BIT(3) -#define CR_SSHIFT BIT(4) -#define CR_DFM BIT(6) -#define CR_FSEL BIT(7) -#define CR_FTHRES_SHIFT 8 -#define CR_FTHRES_MASK GENMASK(12, 8) -#define CR_FTHRES(n) (((n) << CR_FTHRES_SHIFT) & CR_FTHRES_MASK) -#define CR_TEIE BIT(16) -#define CR_TCIE BIT(17) -#define CR_FTIE BIT(18) -#define CR_SMIE BIT(19) -#define CR_TOIE BIT(20) -#define CR_PRESC_SHIFT 24 -#define CR_PRESC_MASK GENMASK(31, 24) -#define CR_PRESC(n) (((n) << CR_PRESC_SHIFT) & CR_PRESC_MASK) - -#define QUADSPI_DCR 0x04 -#define DCR_CSHT_SHIFT 8 -#define DCR_CSHT_MASK GENMASK(10, 8) -#define DCR_CSHT(n) (((n) << DCR_CSHT_SHIFT) & DCR_CSHT_MASK) -#define DCR_FSIZE_SHIFT 16 -#define DCR_FSIZE_MASK GENMASK(20, 16) -#define DCR_FSIZE(n) (((n) << DCR_FSIZE_SHIFT) & DCR_FSIZE_MASK) - -#define QUADSPI_SR 0x08 -#define SR_TEF BIT(0) -#define SR_TCF BIT(1) -#define SR_FTF BIT(2) -#define SR_SMF BIT(3) -#define SR_TOF BIT(4) -#define SR_BUSY BIT(5) -#define SR_FLEVEL_SHIFT 8 -#define SR_FLEVEL_MASK GENMASK(13, 8) - -#define QUADSPI_FCR 0x0c -#define FCR_CTCF BIT(1) - -#define QUADSPI_DLR 0x10 - -#define QUADSPI_CCR 0x14 -#define CCR_INST_SHIFT 0 -#define CCR_INST_MASK GENMASK(7, 0) -#define CCR_INST(n) (((n) << CCR_INST_SHIFT) & CCR_INST_MASK) -#define CCR_IMODE_NONE (0U << 8) -#define CCR_IMODE_1 (1U << 8) -#define CCR_IMODE_2 (2U << 8) -#define CCR_IMODE_4 (3U << 8) -#define CCR_ADMODE_NONE (0U << 10) -#define CCR_ADMODE_1 (1U << 10) -#define CCR_ADMODE_2 (2U << 10) -#define CCR_ADMODE_4 (3U << 10) -#define CCR_ADSIZE_SHIFT 12 -#define CCR_ADSIZE_MASK GENMASK(13, 12) -#define CCR_ADSIZE(n) (((n) << CCR_ADSIZE_SHIFT) & CCR_ADSIZE_MASK) -#define CCR_ABMODE_NONE (0U << 14) -#define CCR_ABMODE_1 (1U << 14) -#define CCR_ABMODE_2 (2U << 14) -#define CCR_ABMODE_4 (3U << 14) -#define CCR_ABSIZE_8 (0U << 16) -#define CCR_ABSIZE_16 (1U << 16) -#define CCR_ABSIZE_24 (2U << 16) -#define CCR_ABSIZE_32 (3U << 16) -#define CCR_DCYC_SHIFT 18 -#define CCR_DCYC_MASK GENMASK(22, 18) -#define CCR_DCYC(n) (((n) << CCR_DCYC_SHIFT) & CCR_DCYC_MASK) -#define CCR_DMODE_NONE (0U << 24) -#define CCR_DMODE_1 (1U << 24) -#define CCR_DMODE_2 (2U << 24) -#define CCR_DMODE_4 (3U << 24) -#define CCR_FMODE_INDW (0U << 26) -#define CCR_FMODE_INDR (1U << 26) -#define CCR_FMODE_APM (2U << 26) -#define CCR_FMODE_MM (3U << 26) - -#define QUADSPI_AR 0x18 -#define QUADSPI_ABR 0x1c -#define QUADSPI_DR 0x20 -#define QUADSPI_PSMKR 0x24 -#define QUADSPI_PSMAR 0x28 -#define QUADSPI_PIR 0x2c -#define QUADSPI_LPTR 0x30 -#define LPTR_DFT_TIMEOUT 0x10 - -#define FSIZE_VAL(size) (__fls(size) - 1) - -#define STM32_MAX_MMAP_SZ SZ_256M -#define STM32_MAX_NORCHIP 2 - -#define STM32_QSPI_FIFO_SZ 32 -#define STM32_QSPI_FIFO_TIMEOUT_US 30000 -#define STM32_QSPI_BUSY_TIMEOUT_US 100000 - -struct stm32_qspi_flash { - struct spi_nor nor; - struct stm32_qspi *qspi; - u32 cs; - u32 fsize; - u32 presc; - u32 read_mode; - bool registered; - u32 prefetch_limit; -}; - -struct stm32_qspi { - struct device *dev; - void __iomem *io_base; - void __iomem *mm_base; - resource_size_t mm_size; - u32 nor_num; - struct clk *clk; - u32 clk_rate; - struct stm32_qspi_flash flash[STM32_MAX_NORCHIP]; - struct completion cmd_completion; - - /* - * to protect device configuration, could be different between - * 2 flash access (bk1, bk2) - */ - struct mutex lock; -}; - -struct stm32_qspi_cmd { - u8 addr_width; - u8 dummy; - bool tx_data; - u8 opcode; - u32 framemode; - u32 qspimode; - u32 addr; - size_t len; - void *buf; -}; - -static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi) -{ - u32 cr; - int err = 0; - - if (readl_relaxed(qspi->io_base + QUADSPI_SR) & SR_TCF) - return 0; - - reinit_completion(&qspi->cmd_completion); - cr = readl_relaxed(qspi->io_base + QUADSPI_CR); - writel_relaxed(cr | CR_TCIE, qspi->io_base + QUADSPI_CR); - - if (!wait_for_completion_interruptible_timeout(&qspi->cmd_completion, - msecs_to_jiffies(1000))) - err = -ETIMEDOUT; - - writel_relaxed(cr, qspi->io_base + QUADSPI_CR); - return err; -} - -static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi) -{ - u32 sr; - - return readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, sr, - !(sr & SR_BUSY), 10, - STM32_QSPI_BUSY_TIMEOUT_US); -} - -static void stm32_qspi_set_framemode(struct spi_nor *nor, - struct stm32_qspi_cmd *cmd, bool read) -{ - u32 dmode = CCR_DMODE_1; - - cmd->framemode = CCR_IMODE_1; - - if (read) { - switch (nor->read_proto) { - default: - case SNOR_PROTO_1_1_1: - dmode = CCR_DMODE_1; - break; - case SNOR_PROTO_1_1_2: - dmode = CCR_DMODE_2; - break; - case SNOR_PROTO_1_1_4: - dmode = CCR_DMODE_4; - break; - } - } - - cmd->framemode |= cmd->tx_data ? dmode : 0; - cmd->framemode |= cmd->addr_width ? CCR_ADMODE_1 : 0; -} - -static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr) -{ - *val = readb_relaxed(addr); -} - -static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr) -{ - writeb_relaxed(*val, addr); -} - -static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, - const struct stm32_qspi_cmd *cmd) -{ - void (*tx_fifo)(u8 *, void __iomem *); - u32 len = cmd->len, sr; - u8 *buf = cmd->buf; - int ret; - - if (cmd->qspimode == CCR_FMODE_INDW) - tx_fifo = stm32_qspi_write_fifo; - else - tx_fifo = stm32_qspi_read_fifo; - - while (len--) { - ret = readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, - sr, (sr & SR_FTF), 10, - STM32_QSPI_FIFO_TIMEOUT_US); - if (ret) { - dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); - return ret; - } - tx_fifo(buf++, qspi->io_base + QUADSPI_DR); - } - - return 0; -} - -static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, - const struct stm32_qspi_cmd *cmd) -{ - memcpy_fromio(cmd->buf, qspi->mm_base + cmd->addr, cmd->len); - return 0; -} - -static int stm32_qspi_tx(struct stm32_qspi *qspi, - const struct stm32_qspi_cmd *cmd) -{ - if (!cmd->tx_data) - return 0; - - if (cmd->qspimode == CCR_FMODE_MM) - return stm32_qspi_tx_mm(qspi, cmd); - - return stm32_qspi_tx_poll(qspi, cmd); -} - -static int stm32_qspi_send(struct stm32_qspi_flash *flash, - const struct stm32_qspi_cmd *cmd) -{ - struct stm32_qspi *qspi = flash->qspi; - u32 ccr, dcr, cr; - u32 last_byte; - int err; - - err = stm32_qspi_wait_nobusy(qspi); - if (err) - goto abort; - - dcr = readl_relaxed(qspi->io_base + QUADSPI_DCR) & ~DCR_FSIZE_MASK; - dcr |= DCR_FSIZE(flash->fsize); - writel_relaxed(dcr, qspi->io_base + QUADSPI_DCR); - - cr = readl_relaxed(qspi->io_base + QUADSPI_CR); - cr &= ~CR_PRESC_MASK & ~CR_FSEL; - cr |= CR_PRESC(flash->presc); - cr |= flash->cs ? CR_FSEL : 0; - writel_relaxed(cr, qspi->io_base + QUADSPI_CR); - - if (cmd->tx_data) - writel_relaxed(cmd->len - 1, qspi->io_base + QUADSPI_DLR); - - ccr = cmd->framemode | cmd->qspimode; - - if (cmd->dummy) - ccr |= CCR_DCYC(cmd->dummy); - - if (cmd->addr_width) - ccr |= CCR_ADSIZE(cmd->addr_width - 1); - - ccr |= CCR_INST(cmd->opcode); - writel_relaxed(ccr, qspi->io_base + QUADSPI_CCR); - - if (cmd->addr_width && cmd->qspimode != CCR_FMODE_MM) - writel_relaxed(cmd->addr, qspi->io_base + QUADSPI_AR); - - err = stm32_qspi_tx(qspi, cmd); - if (err) - goto abort; - - if (cmd->qspimode != CCR_FMODE_MM) { - err = stm32_qspi_wait_cmd(qspi); - if (err) - goto abort; - writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR); - } else { - last_byte = cmd->addr + cmd->len; - if (last_byte > flash->prefetch_limit) - goto abort; - } - - return err; - -abort: - cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT; - writel_relaxed(cr, qspi->io_base + QUADSPI_CR); - - if (err) - dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); - - return err; -} - -static int stm32_qspi_read_reg(struct spi_nor *nor, - u8 opcode, u8 *buf, int len) -{ - struct stm32_qspi_flash *flash = nor->priv; - struct device *dev = flash->qspi->dev; - struct stm32_qspi_cmd cmd; - - dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len); - - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = opcode; - cmd.tx_data = true; - cmd.len = len; - cmd.buf = buf; - cmd.qspimode = CCR_FMODE_INDR; - - stm32_qspi_set_framemode(nor, &cmd, false); - - return stm32_qspi_send(flash, &cmd); -} - -static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode, - u8 *buf, int len) -{ - struct stm32_qspi_flash *flash = nor->priv; - struct device *dev = flash->qspi->dev; - struct stm32_qspi_cmd cmd; - - dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len); - - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = opcode; - cmd.tx_data = !!(buf && len > 0); - cmd.len = len; - cmd.buf = buf; - cmd.qspimode = CCR_FMODE_INDW; - - stm32_qspi_set_framemode(nor, &cmd, false); - - return stm32_qspi_send(flash, &cmd); -} - -static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len, - u_char *buf) -{ - struct stm32_qspi_flash *flash = nor->priv; - struct stm32_qspi *qspi = flash->qspi; - struct stm32_qspi_cmd cmd; - int err; - - dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n", - nor->read_opcode, buf, (u32)from, len); - - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = nor->read_opcode; - cmd.addr_width = nor->addr_width; - cmd.addr = (u32)from; - cmd.tx_data = true; - cmd.dummy = nor->read_dummy; - cmd.len = len; - cmd.buf = buf; - cmd.qspimode = flash->read_mode; - - stm32_qspi_set_framemode(nor, &cmd, true); - err = stm32_qspi_send(flash, &cmd); - - return err ? err : len; -} - -static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len, - const u_char *buf) -{ - struct stm32_qspi_flash *flash = nor->priv; - struct device *dev = flash->qspi->dev; - struct stm32_qspi_cmd cmd; - int err; - - dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n", - nor->program_opcode, buf, (u32)to, len); - - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = nor->program_opcode; - cmd.addr_width = nor->addr_width; - cmd.addr = (u32)to; - cmd.tx_data = true; - cmd.len = len; - cmd.buf = (void *)buf; - cmd.qspimode = CCR_FMODE_INDW; - - stm32_qspi_set_framemode(nor, &cmd, false); - err = stm32_qspi_send(flash, &cmd); - - return err ? err : len; -} - -static int stm32_qspi_erase(struct spi_nor *nor, loff_t offs) -{ - struct stm32_qspi_flash *flash = nor->priv; - struct device *dev = flash->qspi->dev; - struct stm32_qspi_cmd cmd; - - dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs); - - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = nor->erase_opcode; - cmd.addr_width = nor->addr_width; - cmd.addr = (u32)offs; - cmd.qspimode = CCR_FMODE_INDW; - - stm32_qspi_set_framemode(nor, &cmd, false); - - return stm32_qspi_send(flash, &cmd); -} - -static irqreturn_t stm32_qspi_irq(int irq, void *dev_id) -{ - struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id; - u32 cr, sr, fcr = 0; - - cr = readl_relaxed(qspi->io_base + QUADSPI_CR); - sr = readl_relaxed(qspi->io_base + QUADSPI_SR); - - if ((cr & CR_TCIE) && (sr & SR_TCF)) { - /* tx complete */ - fcr |= FCR_CTCF; - complete(&qspi->cmd_completion); - } else { - dev_info_ratelimited(qspi->dev, "spurious interrupt\n"); - } - - writel_relaxed(fcr, qspi->io_base + QUADSPI_FCR); - - return IRQ_HANDLED; -} - -static int stm32_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct stm32_qspi_flash *flash = nor->priv; - struct stm32_qspi *qspi = flash->qspi; - - mutex_lock(&qspi->lock); - return 0; -} - -static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct stm32_qspi_flash *flash = nor->priv; - struct stm32_qspi *qspi = flash->qspi; - - mutex_unlock(&qspi->lock); -} - -static int stm32_qspi_flash_setup(struct stm32_qspi *qspi, - struct device_node *np) -{ - struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ | - SNOR_HWCAPS_READ_FAST | - SNOR_HWCAPS_PP, - }; - u32 width, presc, cs_num, max_rate = 0; - struct stm32_qspi_flash *flash; - struct mtd_info *mtd; - int ret; - - of_property_read_u32(np, "reg", &cs_num); - if (cs_num >= STM32_MAX_NORCHIP) - return -EINVAL; - - of_property_read_u32(np, "spi-max-frequency", &max_rate); - if (!max_rate) - return -EINVAL; - - presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1; - - if (of_property_read_u32(np, "spi-rx-bus-width", &width)) - width = 1; - - if (width == 4) - hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; - else if (width == 2) - hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; - else if (width != 1) - return -EINVAL; - - flash = &qspi->flash[cs_num]; - flash->qspi = qspi; - flash->cs = cs_num; - flash->presc = presc; - - flash->nor.dev = qspi->dev; - spi_nor_set_flash_node(&flash->nor, np); - flash->nor.priv = flash; - mtd = &flash->nor.mtd; - - flash->nor.read = stm32_qspi_read; - flash->nor.write = stm32_qspi_write; - flash->nor.erase = stm32_qspi_erase; - flash->nor.read_reg = stm32_qspi_read_reg; - flash->nor.write_reg = stm32_qspi_write_reg; - flash->nor.prepare = stm32_qspi_prep; - flash->nor.unprepare = stm32_qspi_unprep; - - writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QUADSPI_LPTR); - - writel_relaxed(CR_PRESC(presc) | CR_FTHRES(3) | CR_TCEN | CR_SSHIFT - | CR_EN, qspi->io_base + QUADSPI_CR); - - /* - * in stm32 qspi controller, QUADSPI_DCR register has a fsize field - * which define the size of nor flash. - * if fsize is NULL, the controller can't sent spi-nor command. - * set a temporary value just to discover the nor flash with - * "spi_nor_scan". After, the right value (mtd->size) can be set. - */ - flash->fsize = FSIZE_VAL(SZ_1K); - - ret = spi_nor_scan(&flash->nor, NULL, &hwcaps); - if (ret) { - dev_err(qspi->dev, "device scan failed\n"); - return ret; - } - - flash->fsize = FSIZE_VAL(mtd->size); - flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ; - - flash->read_mode = CCR_FMODE_MM; - if (mtd->size > qspi->mm_size) - flash->read_mode = CCR_FMODE_INDR; - - writel_relaxed(DCR_CSHT(1), qspi->io_base + QUADSPI_DCR); - - ret = mtd_device_register(mtd, NULL, 0); - if (ret) { - dev_err(qspi->dev, "mtd device parse failed\n"); - return ret; - } - - flash->registered = true; - - dev_dbg(qspi->dev, "read mm:%s cs:%d bus:%d\n", - flash->read_mode == CCR_FMODE_MM ? "yes" : "no", cs_num, width); - - return 0; -} - -static void stm32_qspi_mtd_free(struct stm32_qspi *qspi) -{ - int i; - - for (i = 0; i < STM32_MAX_NORCHIP; i++) - if (qspi->flash[i].registered) - mtd_device_unregister(&qspi->flash[i].nor.mtd); -} - -static int stm32_qspi_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *flash_np; - struct reset_control *rstc; - struct stm32_qspi *qspi; - struct resource *res; - int ret, irq; - - qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); - if (!qspi) - return -ENOMEM; - - qspi->nor_num = of_get_child_count(dev->of_node); - if (!qspi->nor_num || qspi->nor_num > STM32_MAX_NORCHIP) - return -ENODEV; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); - qspi->io_base = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->io_base)) - return PTR_ERR(qspi->io_base); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); - qspi->mm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->mm_base)) - return PTR_ERR(qspi->mm_base); - - qspi->mm_size = resource_size(res); - - irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, - dev_name(dev), qspi); - if (ret) { - dev_err(dev, "failed to request irq\n"); - return ret; - } - - init_completion(&qspi->cmd_completion); - - qspi->clk = devm_clk_get(dev, NULL); - if (IS_ERR(qspi->clk)) - return PTR_ERR(qspi->clk); - - qspi->clk_rate = clk_get_rate(qspi->clk); - if (!qspi->clk_rate) - return -EINVAL; - - ret = clk_prepare_enable(qspi->clk); - if (ret) { - dev_err(dev, "can not enable the clock\n"); - return ret; - } - - rstc = devm_reset_control_get_exclusive(dev, NULL); - if (!IS_ERR(rstc)) { - reset_control_assert(rstc); - udelay(2); - reset_control_deassert(rstc); - } - - qspi->dev = dev; - platform_set_drvdata(pdev, qspi); - mutex_init(&qspi->lock); - - for_each_available_child_of_node(dev->of_node, flash_np) { - ret = stm32_qspi_flash_setup(qspi, flash_np); - if (ret) { - dev_err(dev, "unable to setup flash chip\n"); - goto err_flash; - } - } - - return 0; - -err_flash: - mutex_destroy(&qspi->lock); - stm32_qspi_mtd_free(qspi); - - clk_disable_unprepare(qspi->clk); - return ret; -} - -static int stm32_qspi_remove(struct platform_device *pdev) -{ - struct stm32_qspi *qspi = platform_get_drvdata(pdev); - - /* disable qspi */ - writel_relaxed(0, qspi->io_base + QUADSPI_CR); - - stm32_qspi_mtd_free(qspi); - mutex_destroy(&qspi->lock); - - clk_disable_unprepare(qspi->clk); - return 0; -} - -static const struct of_device_id stm32_qspi_match[] = { - {.compatible = "st,stm32f469-qspi"}, - {} -}; -MODULE_DEVICE_TABLE(of, stm32_qspi_match); - -static struct platform_driver stm32_qspi_driver = { - .probe = stm32_qspi_probe, - .remove = stm32_qspi_remove, - .driver = { - .name = "stm32-quadspi", - .of_match_table = stm32_qspi_match, - }, -}; -module_platform_driver(stm32_qspi_driver); - -MODULE_AUTHOR("Ludovic Barre "); -MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver"); -MODULE_LICENSE("GPL v2"); From 21ed90acd1786880129eeae9d6718b5fc8663c36 Mon Sep 17 00:00:00 2001 From: Andrey Smirnov Date: Sun, 12 May 2019 20:33:26 -0700 Subject: [PATCH 02/60] mtd: spi-nor: Add Micron MT25QL02 support Add an entry for Micron MT25QL02 which is a 3V variant of already supported MT25QU02. Testing was done on a ZII VF610 Dev Board (rev. B). Signed-off-by: Cory Tusar Signed-off-by: Andrey Smirnov Cc: Chris Healy Cc: Marek Vasut Cc: Tudor Ambarus Cc: linux-mtd@lists.infradead.org Cc: linux-kernel@vger.kernel.org [tudor.ambarus@microchip.com: order entry alphabetically, wrap to 80 chars limit] Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spi-nor.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 73172d7f512b..c8a965859d7b 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1880,6 +1880,9 @@ static const struct flash_info spi_nor_ids[] = { { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, + { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096, + SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | + NO_CHIP_ERASE) }, { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, /* Micron */ From 92aae4ce84275036977468379484012545afb31a Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 24 May 2019 16:45:45 +0200 Subject: [PATCH 03/60] mtd: spi-nor: change "error reading JEDEC id" from dbg to err In case of SPI error during the reading of the nor Id, the probe fails without any error message related to the JEDEC Id reading procedure. Signed-off-by: Flavio Suligoi Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spi-nor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index c8a965859d7b..c0a8837c0575 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -2065,7 +2065,7 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN); if (tmp < 0) { - dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp); + dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp); return ERR_PTR(tmp); } From 88489c29de60783a417c79ef418bedfec5d61fa1 Mon Sep 17 00:00:00 2001 From: Robert Marko Date: Sat, 18 May 2019 21:29:35 +0200 Subject: [PATCH 04/60] mtd: spi-nor: Add Winbond w25q16jv support Testing done on Mikrotik Routerboard RB450Gx4 board under 4.14.119 and 4.19.43 kernels. The test board does not support Dual or Quad modes. Datasheet at: https://www.winbond.com/resource-files/w25q16jv%20spi%20revg%2003222018%20plus.pdf Signed-off-by: Robert Marko [tudor.ambarus@microchip.com: w25q16jv-im/jm and w25q16jv-iq/jq have different jedec ids, fix flash name.] Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spi-nor.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index c0a8837c0575..f87e18e125fd 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1999,6 +1999,11 @@ static const struct flash_info spi_nor_ids[] = { SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, + { + "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) }, From 2d60d1f64b57b5a5cb259caa6fbe4aaa3f9def3a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 7 Jun 2019 13:28:48 +0200 Subject: [PATCH 05/60] mtd: spi-nor: Spelling s/Writ/Write/ Signed-off-by: Geert Uytterhoeven Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spi-nor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index f87e18e125fd..9d03fe50533e 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -200,7 +200,7 @@ struct sfdp_header { * register does not modify status register 2. * - 101b: QE is bit 1 of status register 2. Status register 1 is read using * Read Status instruction 05h. Status register2 is read using - * instruction 35h. QE is set via Writ Status instruction 01h with + * instruction 35h. QE is set via Write Status instruction 01h with * two data bytes where bit 1 of the second byte is one. * [...] */ From ba0d4e04a5b57ef048dbf3afd9107ae6ca353258 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 20 Jun 2019 15:26:28 +0300 Subject: [PATCH 06/60] mtd: spi-nor: intel-spi: Add support for Intel Elkhart Lake SPI serial flash Intel Elkhart Lake has the same SPI serial flash controller as Ice Lake. Add Elkhart Lake PCI ID to the driver list of supported devices. Signed-off-by: Mika Westerberg Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/intel-spi-pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c index bfbfc17ed6aa..578f0c74e536 100644 --- a/drivers/mtd/spi-nor/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/intel-spi-pci.c @@ -67,6 +67,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, { }, From 62de37da9f382455b983f2f92b10012109005278 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 20 Jun 2019 15:26:29 +0300 Subject: [PATCH 07/60] mtd: spi-nor: intel-spi: Convert to use SPDX identifier This gets rid of the license boilerplate duplicated in each file. No functional changes intended. Signed-off-by: Mika Westerberg Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/intel-spi-pci.c | 5 +---- drivers/mtd/spi-nor/intel-spi-platform.c | 5 +---- drivers/mtd/spi-nor/intel-spi.c | 5 +---- drivers/mtd/spi-nor/intel-spi.h | 5 +---- include/linux/platform_data/intel-spi.h | 5 +---- 5 files changed, 5 insertions(+), 20 deletions(-) diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c index 578f0c74e536..1b9c2d99ba38 100644 --- a/drivers/mtd/spi-nor/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/intel-spi-pci.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel PCH/PCU SPI flash PCI driver. * * Copyright (C) 2016, Intel Corporation * Author: Mika Westerberg - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include diff --git a/drivers/mtd/spi-nor/intel-spi-platform.c b/drivers/mtd/spi-nor/intel-spi-platform.c index 5c943df9398f..25b18804e9bb 100644 --- a/drivers/mtd/spi-nor/intel-spi-platform.c +++ b/drivers/mtd/spi-nor/intel-spi-platform.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel PCH/PCU SPI flash platform driver. * * Copyright (C) 2016, Intel Corporation * Author: Mika Westerberg - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index d60cbf23d9aa..021cef930f9f 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel PCH/PCU SPI flash driver. * * Copyright (C) 2016, Intel Corporation * Author: Mika Westerberg - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include diff --git a/drivers/mtd/spi-nor/intel-spi.h b/drivers/mtd/spi-nor/intel-spi.h index 5ab7dc250050..b03bf296fda3 100644 --- a/drivers/mtd/spi-nor/intel-spi.h +++ b/drivers/mtd/spi-nor/intel-spi.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Intel PCH/PCU SPI flash driver. * * Copyright (C) 2016, Intel Corporation * Author: Mika Westerberg - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef INTEL_SPI_H diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h index 942b0c3f8f08..001f377fb5ef 100644 --- a/include/linux/platform_data/intel-spi.h +++ b/include/linux/platform_data/intel-spi.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Intel PCH/PCU SPI flash driver. * * Copyright (C) 2016, Intel Corporation * Author: Mika Westerberg - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef INTEL_SPI_PDATA_H From cf580a92400584a48f59e07c372a635160340864 Mon Sep 17 00:00:00 2001 From: Liu Xiang Date: Tue, 25 Jun 2019 00:00:46 +0800 Subject: [PATCH 08/60] mtd: spi-nor: fix nor->addr_width when its value configured from SFDP does not match the actual width IS25LP256 gets BFPT_DWORD1_ADDRESS_BYTES_3_ONLY from BFPT table for address width. But in actual fact the flash can support 4-byte address. Use a post bfpt fixup hook to overwrite the address width advertised by the BFPT. Signed-off-by: Liu Xiang Reviewed-by: Vignesh Raghavendra Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/spi-nor.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 9d03fe50533e..e3a28c0f22cb 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1686,6 +1686,28 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor) .addr_width = 3, \ .flags = SPI_NOR_NO_FR | SPI_S3AN, +static int +is25lp256_post_bfpt_fixups(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * IS25LP256 supports 4B opcodes, but the BFPT advertises a + * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width. + * Overwrite the address width advertised by the BFPT. + */ + if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) == + BFPT_DWORD1_ADDRESS_BYTES_3_ONLY) + nor->addr_width = 4; + + return 0; +} + +static struct spi_nor_fixups is25lp256_fixups = { + .post_bfpt = is25lp256_post_bfpt_fixups, +}; + static int mx25l25635_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, @@ -1827,7 +1849,8 @@ static const struct flash_info spi_nor_ids[] = { SECT_4K | SPI_NOR_DUAL_READ) }, { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | - SPI_NOR_4B_OPCODES) }, + SPI_NOR_4B_OPCODES) + .fixups = &is25lp256_fixups }, { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128, From 63d3cd297bc045536e4c3eaddc2cf6aa4a8cf0df Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Thu, 13 Jun 2019 06:31:37 -0500 Subject: [PATCH 09/60] dt-bindings: cadence-quadspi: add options reset property The QSPI module can have an optional reset signals that will hold the module in a reset state. Signed-off-by: Dinh Nguyen Signed-off-by: Tudor Ambarus --- Documentation/devicetree/bindings/mtd/cadence-quadspi.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt index 4345c3a6f530..945be7d5b236 100644 --- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt @@ -35,6 +35,9 @@ custom properties: (qspi_n_ss_out). - cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low and first bit transfer. +- resets : Must contain an entry for each entry in reset-names. + See ../reset/reset.txt for details. +- reset-names : Must include either "qspi" and/or "qspi-ocp". Example: @@ -50,6 +53,8 @@ Example: cdns,fifo-depth = <128>; cdns,fifo-width = <4>; cdns,trigger-address = <0x00000000>; + resets = <&rst QSPI_RESET>, <&rst QSPI_OCP_RESET>; + reset-names = "qspi", "qspi-ocp"; flash0: n25q00@0 { ... From 8d1336c241bdadf61a56e398d82d1e512dbff5f8 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Thu, 13 Jun 2019 06:31:38 -0500 Subject: [PATCH 10/60] mtd: spi-nor: cadence-quadspi: add reset control Get the reset control properties for the QSPI controller and bring them out of reset. Most will have just one reset bit, but there is an additional OCP reset bit that is used ECC. The OCP reset bit will also need to get de-asserted as well. [1] The reason this patch is needed is in the case where a bootloader leaves the QSPI controller in a reset state, or a state where init cannot occur successfully, the patch will put the QSPI controller into a clean state. [1] https://www.intel.com/content/www/us/en/programmable/hps/arria-10/hps.html#reg_soc_top/sfo1429890575955.html Suggested-by: Tien-Fong Chee Signed-off-by: Dinh Nguyen Reviewed-by: Vignesh Raghavendra [tudor.ambarus@microchip.com: declare rstc and rstc_ocp on the same line] Signed-off-by: Tudor Ambarus --- drivers/mtd/spi-nor/cadence-quadspi.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 792628750eec..59b8cc3457b6 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -1336,6 +1337,7 @@ static int cqspi_probe(struct platform_device *pdev) struct cqspi_st *cqspi; struct resource *res; struct resource *res_ahb; + struct reset_control *rstc, *rstc_ocp; const struct cqspi_driver_platdata *ddata; int ret; int irq; @@ -1402,6 +1404,25 @@ static int cqspi_probe(struct platform_device *pdev) goto probe_clk_failed; } + /* Obtain QSPI reset control */ + rstc = devm_reset_control_get_optional_exclusive(dev, "qspi"); + if (IS_ERR(rstc)) { + dev_err(dev, "Cannot get QSPI reset.\n"); + return PTR_ERR(rstc); + } + + rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); + if (IS_ERR(rstc_ocp)) { + dev_err(dev, "Cannot get QSPI OCP reset.\n"); + return PTR_ERR(rstc_ocp); + } + + reset_control_assert(rstc); + reset_control_deassert(rstc); + + reset_control_assert(rstc_ocp); + reset_control_deassert(rstc_ocp); + cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); ddata = of_device_get_match_data(dev); if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY)) From 99a125f8edec391e423962847c6fd1d6994f0ad8 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Wed, 22 May 2019 12:06:28 +1200 Subject: [PATCH 11/60] mtd: cfi_cmdset_0002: dynamically determine the max sectors Because PPB unlocking unlocks the whole chip cfi_ppb_unlock() needs to remember the locked status for each sector so it can re-lock the unaddressed sectors. Dynamically calculate the maximum number of sectors rather than using a hardcoded value that is too small for larger chips. Tested with Spansion S29GL01GS11TFI flash device. Signed-off-by: Chris Packham Reviewed-by: Stefan Roese Acked-by: Vignesh Raghavendra Signed-off-by: Miquel Raynal --- drivers/mtd/chips/cfi_cmdset_0002.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index c8fa5906bdf9..a1a7d334aa82 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -2533,8 +2533,6 @@ struct ppb_lock { int locked; }; -#define MAX_SECTORS 512 - #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) @@ -2633,6 +2631,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, int i; int sectors; int ret; + int max_sectors; /* * PPB unlocking always unlocks all sectors of the flash chip. @@ -2640,7 +2639,11 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, * first check the locking status of all sectors and save * it for future use. */ - sect = kcalloc(MAX_SECTORS, sizeof(struct ppb_lock), GFP_KERNEL); + max_sectors = 0; + for (i = 0; i < mtd->numeraseregions; i++) + max_sectors += regions[i].numblocks; + + sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL); if (!sect) return -ENOMEM; @@ -2689,9 +2692,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, } sectors++; - if (sectors >= MAX_SECTORS) { + if (sectors >= max_sectors) { printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", - MAX_SECTORS); + max_sectors); kfree(sect); return -EINVAL; } From 4844ef80305d0180051d0787cd91c63573255dc2 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 25 Jun 2019 13:27:42 +0530 Subject: [PATCH 12/60] mtd: cfi_cmdset_0002: Add support for polling status register HyperFlash devices are compliant with CFI AMD/Fujitsu Extended Command Set (0x0002) for flash operations, therefore drivers/mtd/chips/cfi_cmdset_0002.c can be used as is. But these devices do not support DQ polling method of determining chip ready/good status. These flashes provide Status Register whose bits can be polled to know status of flash operation. Cypress HyperFlash datasheet here[1], talks about CFI Amd/Fujitsu Extended Query version 1.5. Bit 0 of "Software Features supported" field of CFI Primary Vendor-Specific Extended Query table indicates presence/absence of status register and Bit 1 indicates whether or not DQ polling is supported. Using these bits, its possible to determine whether flash supports DQ polling or need to use Status Register. Add support for polling Status Register to know device ready/status of erase/write operations when DQ polling is not supported. Print error messages on erase/program failure by looking at related Status Register bits. [1] https://www.cypress.com/file/213346/download Signed-off-by: Vignesh Raghavendra Reviewed-by: Tokunori Ikegami Signed-off-by: Miquel Raynal --- drivers/mtd/chips/cfi_cmdset_0002.c | 130 ++++++++++++++++++++++++---- include/linux/mtd/cfi.h | 7 ++ 2 files changed, 120 insertions(+), 17 deletions(-) diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index a1a7d334aa82..f4da7bd552e9 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -49,6 +49,16 @@ #define SST49LF008A 0x005a #define AT49BV6416 0x00d6 +/* + * Status Register bit description. Used by flash devices that don't + * support DQ polling (e.g. HyperFlash) + */ +#define CFI_SR_DRB BIT(7) +#define CFI_SR_ESB BIT(5) +#define CFI_SR_PSB BIT(4) +#define CFI_SR_WBASB BIT(3) +#define CFI_SR_SLSB BIT(1) + static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); @@ -97,6 +107,50 @@ static struct mtd_chip_driver cfi_amdstd_chipdrv = { .module = THIS_MODULE }; +/* + * Use status register to poll for Erase/write completion when DQ is not + * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in + * CFI Primary Vendor-Specific Extended Query table 1.5 + */ +static int cfi_use_status_reg(struct cfi_private *cfi) +{ + struct cfi_pri_amdstd *extp = cfi->cmdset_priv; + u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; + + return extp->MinorVersion >= '5' && + (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; +} + +static void cfi_check_err_status(struct map_info *map, struct flchip *chip, + unsigned long adr) +{ + struct cfi_private *cfi = map->fldrv_priv; + map_word status; + + if (!cfi_use_status_reg(cfi)) + return; + + cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + status = map_read(map, adr); + + if (map_word_bitsset(map, status, CMD(0x3a))) { + unsigned long chipstatus = MERGESTATUS(status); + + if (chipstatus & CFI_SR_ESB) + pr_err("%s erase operation failed, status %lx\n", + map->name, chipstatus); + if (chipstatus & CFI_SR_PSB) + pr_err("%s program operation failed, status %lx\n", + map->name, chipstatus); + if (chipstatus & CFI_SR_WBASB) + pr_err("%s buffer program command aborted, status %lx\n", + map->name, chipstatus); + if (chipstatus & CFI_SR_SLSB) + pr_err("%s sector write protected, status %lx\n", + map->name, chipstatus); + } +} /* #define DEBUG_CFI_FEATURES */ @@ -742,10 +796,25 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) * correctly and is therefore not done (particularly with interleaved chips * as each chip must be checked independently of the others). */ -static int __xipram chip_ready(struct map_info *map, unsigned long addr) +static int __xipram chip_ready(struct map_info *map, struct flchip *chip, + unsigned long addr) { + struct cfi_private *cfi = map->fldrv_priv; map_word d, t; + if (cfi_use_status_reg(cfi)) { + map_word ready = CMD(CFI_SR_DRB); + /* + * For chips that support status register, check device + * ready bit + */ + cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + d = map_read(map, addr); + + return map_word_andequal(map, d, ready, ready); + } + d = map_read(map, addr); t = map_read(map, addr); @@ -767,10 +836,30 @@ static int __xipram chip_ready(struct map_info *map, unsigned long addr) * as each chip must be checked independently of the others). * */ -static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) +static int __xipram chip_good(struct map_info *map, struct flchip *chip, + unsigned long addr, map_word expected) { + struct cfi_private *cfi = map->fldrv_priv; map_word oldd, curd; + if (cfi_use_status_reg(cfi)) { + map_word ready = CMD(CFI_SR_DRB); + map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB); + /* + * For chips that support status register, check device + * ready bit and Erase/Program status bit to know if + * operation succeeded. + */ + cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + curd = map_read(map, addr); + + if (map_word_andequal(map, curd, ready, ready)) + return !map_word_bitsset(map, curd, err); + + return 0; + } + oldd = map_read(map, addr); curd = map_read(map, addr); @@ -792,7 +881,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr case FL_STATUS: for (;;) { - if (chip_ready(map, adr)) + if (chip_ready(map, chip, adr)) break; if (time_after(jiffies, timeo)) { @@ -830,7 +919,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { - if (chip_ready(map, adr)) + if (chip_ready(map, chip, adr)) break; if (time_after(jiffies, timeo)) { @@ -1362,7 +1451,7 @@ static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, /* wait for chip to become ready */ timeo = jiffies + msecs_to_jiffies(2); for (;;) { - if (chip_ready(map, adr)) + if (chip_ready(map, chip, adr)) break; if (time_after(jiffies, timeo)) { @@ -1628,22 +1717,24 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, continue; } - if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ + if (time_after(jiffies, timeo) && + !chip_ready(map, chip, adr)) { xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); xip_disable(map, chip, adr); break; } - if (chip_ready(map, adr)) + if (chip_ready(map, chip, adr)) break; /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); } /* Did we succeed? */ - if (!chip_good(map, adr, datum)) { + if (!chip_good(map, chip, adr, datum)) { /* reset on all failures. */ + cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -1881,10 +1972,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, * We check "time_after" and "!chip_good" before checking "chip_good" to avoid * the failure due to scheduling. */ - if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) + if (time_after(jiffies, timeo) && + !chip_good(map, chip, adr, datum)) break; - if (chip_good(map, adr, datum)) { + if (chip_good(map, chip, adr, datum)) { xip_enable(map, chip, adr); goto op_done; } @@ -1901,6 +1993,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, * See e.g. * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf */ + cfi_check_err_status(map, chip, adr); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, @@ -2018,7 +2111,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, * If the driver thinks the chip is idle, and no toggle bits * are changing, then the chip is actually idle for sure. */ - if (chip->state == FL_READY && chip_ready(map, adr)) + if (chip->state == FL_READY && chip_ready(map, chip, adr)) return 0; /* @@ -2035,7 +2128,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, /* wait for the chip to become ready */ for (i = 0; i < jiffies_to_usecs(timeo); i++) { - if (chip_ready(map, adr)) + if (chip_ready(map, chip, adr)) return 0; udelay(1); @@ -2099,14 +2192,15 @@ retry: map_write(map, datum, adr); for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { - if (chip_ready(map, adr)) + if (chip_ready(map, chip, adr)) break; udelay(1); } - if (!chip_good(map, adr, datum)) { + if (!chip_good(map, chip, adr, datum)) { /* reset on all failures. */ + cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -2300,7 +2394,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->erase_suspended = 0; } - if (chip_good(map, adr, map_word_ff(map))) + if (chip_good(map, chip, adr, map_word_ff(map))) break; if (time_after(jiffies, timeo)) { @@ -2316,6 +2410,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) /* Did we succeed? */ if (ret) { /* reset on all failures. */ + cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -2396,7 +2491,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->erase_suspended = 0; } - if (chip_good(map, adr, map_word_ff(map))) + if (chip_good(map, chip, adr, map_word_ff(map))) break; if (time_after(jiffies, timeo)) { @@ -2412,6 +2507,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, /* Did we succeed? */ if (ret) { /* reset on all failures. */ + cfi_check_err_status(map, chip, adr); map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */ @@ -2587,7 +2683,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, */ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ for (;;) { - if (chip_ready(map, adr)) + if (chip_ready(map, chip, adr)) break; if (time_after(jiffies, timeo)) { diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index cbf77168658c..7fdbc1ff6527 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -233,6 +233,13 @@ struct cfi_pri_amdstd { uint8_t VppMin; uint8_t VppMax; uint8_t TopBottom; + /* Below field are added from version 1.5 */ + uint8_t ProgramSuspend; + uint8_t UnlockBypass; + uint8_t SecureSiliconSector; + uint8_t SoftwareFeatures; +#define CFI_POLL_STATUS_REG BIT(0) +#define CFI_POLL_DQ BIT(1) } __packed; /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ From 89ebf2b8501c41b16b093fbf8b617fb5630445d9 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 25 Jun 2019 13:27:43 +0530 Subject: [PATCH 13/60] dt-bindings: mtd: Add binding documentation for HyperFlash Add DT binding documentation for HyperFlash devices. Signed-off-by: Vignesh Raghavendra Reviewed-by: Rob Herring Signed-off-by: Miquel Raynal --- .../devicetree/bindings/mtd/cypress,hyperflash.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt diff --git a/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt b/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt new file mode 100644 index 000000000000..ad42f4db32f1 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt @@ -0,0 +1,13 @@ +Bindings for HyperFlash NOR flash chips compliant with Cypress HyperBus +specification and supports Cypress CFI specification 1.5 command set. + +Required properties: +- compatible : "cypress,hyperflash", "cfi-flash" for HyperFlash NOR chips +- reg : Address of flash's memory map + +Example: + + flash@0 { + compatible = "cypress,hyperflash", "cfi-flash"; + reg = <0x0 0x4000000>; + }; From dcc7d3446a0fa19bd7e8074920b8f9ef3b7ec00c Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 25 Jun 2019 13:27:44 +0530 Subject: [PATCH 14/60] mtd: Add support for HyperBus memory devices Cypress' HyperBus is Low Signal Count, High Performance Double Data Rate Bus interface between a host system master and one or more slave interfaces. HyperBus is used to connect microprocessor, microcontroller, or ASIC devices with random access NOR flash memory (called HyperFlash) or self refresh DRAM (called HyperRAM). Its a 8-bit data bus (DQ[7:0]) with Read-Write Data Strobe (RWDS) signal and either Single-ended clock(3.0V parts) or Differential clock (1.8V parts). It uses ChipSelect lines to select b/w multiple slaves. At bus level, it follows a separate protocol described in HyperBus specification[1]. HyperFlash follows CFI AMD/Fujitsu Extended Command Set (0x0002) similar to that of existing parallel NORs. Since HyperBus is x8 DDR bus, its equivalent to x16 parallel NOR flash with respect to bits per clock cycle. But HyperBus operates at >166MHz frequencies. HyperRAM provides direct random read/write access to flash memory array. But, HyperBus memory controllers seem to abstract implementation details and expose a simple MMIO interface to access connected flash. Add support for registering HyperFlash devices with MTD framework. MTD maps framework along with CFI chip support framework are used to support communicating with flash. Framework is modelled along the lines of spi-nor framework. HyperBus memory controller (HBMC) drivers calls hyperbus_register_device() to register a single HyperFlash device. HyperFlash core parses MMIO access information from DT, sets up the map_info struct, probes CFI flash and registers it with MTD framework. Some HBMC masters need calibration/training sequence[3] to be carried out, in order for DLL inside the controller to lock, by reading a known string/pattern. This is done by repeatedly reading CFI Query Identification String. Calibration needs to be done before trying to detect flash as part of CFI flash probe. HyperRAM is not supported at the moment. HyperBus specification can be found at[1] HyperFlash datasheet can be found at[2] [1] https://www.cypress.com/file/213356/download [2] https://www.cypress.com/file/213346/download [3] http://www.ti.com/lit/ug/spruid7b/spruid7b.pdf Table 12-5741. HyperFlash Access Sequence Signed-off-by: Vignesh Raghavendra Signed-off-by: Miquel Raynal --- MAINTAINERS | 7 ++ drivers/mtd/Kconfig | 2 + drivers/mtd/Makefile | 1 + drivers/mtd/hyperbus/Kconfig | 11 ++ drivers/mtd/hyperbus/Makefile | 3 + drivers/mtd/hyperbus/hyperbus-core.c | 153 +++++++++++++++++++++++++++ include/linux/mtd/hyperbus.h | 84 +++++++++++++++ 7 files changed, 261 insertions(+) create mode 100644 drivers/mtd/hyperbus/Kconfig create mode 100644 drivers/mtd/hyperbus/Makefile create mode 100644 drivers/mtd/hyperbus/hyperbus-core.c create mode 100644 include/linux/mtd/hyperbus.h diff --git a/MAINTAINERS b/MAINTAINERS index 5cfbea4ce575..f1253adb8cf6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7305,6 +7305,13 @@ F: include/uapi/linux/hyperv.h F: tools/hv/ F: Documentation/ABI/stable/sysfs-bus-vmbus +HYPERBUS SUPPORT +M: Vignesh Raghavendra +S: Supported +F: drivers/mtd/hyperbus/ +F: include/linux/mtd/hyperbus.h +F: Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt + HYPERVISOR VIRTUAL CONSOLE DRIVER L: linuxppc-dev@lists.ozlabs.org S: Odd Fixes diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index fb31a7f649a3..80a6e2dcd085 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -274,4 +274,6 @@ source "drivers/mtd/spi-nor/Kconfig" source "drivers/mtd/ubi/Kconfig" +source "drivers/mtd/hyperbus/Kconfig" + endif # MTD diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 806287e80e84..62d649a959e2 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile @@ -34,3 +34,4 @@ obj-y += chips/ lpddr/ maps/ devices/ nand/ tests/ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/ obj-$(CONFIG_MTD_UBI) += ubi/ +obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/ diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig new file mode 100644 index 000000000000..98147e28caa0 --- /dev/null +++ b/drivers/mtd/hyperbus/Kconfig @@ -0,0 +1,11 @@ +menuconfig MTD_HYPERBUS + tristate "HyperBus support" + select MTD_CFI + select MTD_MAP_BANK_WIDTH_2 + select MTD_CFI_AMDSTD + select MTD_COMPLEX_MAPPINGS + help + This is the framework for the HyperBus which can be used by + the HyperBus Controller driver to communicate with + HyperFlash. See Cypress HyperBus specification for more + details diff --git a/drivers/mtd/hyperbus/Makefile b/drivers/mtd/hyperbus/Makefile new file mode 100644 index 000000000000..ca61dedd730d --- /dev/null +++ b/drivers/mtd/hyperbus/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_MTD_HYPERBUS) += hyperbus-core.o diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c new file mode 100644 index 000000000000..6af9ea34117d --- /dev/null +++ b/drivers/mtd/hyperbus/hyperbus-core.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Author: Vignesh Raghavendra + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct hyperbus_device *map_to_hbdev(struct map_info *map) +{ + return container_of(map, struct hyperbus_device, map); +} + +static map_word hyperbus_read16(struct map_info *map, unsigned long addr) +{ + struct hyperbus_device *hbdev = map_to_hbdev(map); + struct hyperbus_ctlr *ctlr = hbdev->ctlr; + map_word read_data; + + read_data.x[0] = ctlr->ops->read16(hbdev, addr); + + return read_data; +} + +static void hyperbus_write16(struct map_info *map, map_word d, + unsigned long addr) +{ + struct hyperbus_device *hbdev = map_to_hbdev(map); + struct hyperbus_ctlr *ctlr = hbdev->ctlr; + + ctlr->ops->write16(hbdev, addr, d.x[0]); +} + +static void hyperbus_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + struct hyperbus_device *hbdev = map_to_hbdev(map); + struct hyperbus_ctlr *ctlr = hbdev->ctlr; + + ctlr->ops->copy_from(hbdev, to, from, len); +} + +static void hyperbus_copy_to(struct map_info *map, unsigned long to, + const void *from, ssize_t len) +{ + struct hyperbus_device *hbdev = map_to_hbdev(map); + struct hyperbus_ctlr *ctlr = hbdev->ctlr; + + ctlr->ops->copy_to(hbdev, to, from, len); +} + +int hyperbus_register_device(struct hyperbus_device *hbdev) +{ + const struct hyperbus_ops *ops; + struct hyperbus_ctlr *ctlr; + struct device_node *np; + struct map_info *map; + struct resource res; + struct device *dev; + int ret; + + if (!hbdev || !hbdev->np || !hbdev->ctlr || !hbdev->ctlr->dev) { + pr_err("hyperbus: please fill all the necessary fields!\n"); + return -EINVAL; + } + + np = hbdev->np; + ctlr = hbdev->ctlr; + if (!of_device_is_compatible(np, "cypress,hyperflash")) + return -ENODEV; + + hbdev->memtype = HYPERFLASH; + + ret = of_address_to_resource(np, 0, &res); + if (ret) + return ret; + + dev = ctlr->dev; + map = &hbdev->map; + map->size = resource_size(&res); + map->virt = devm_ioremap_resource(dev, &res); + if (IS_ERR(map->virt)) + return PTR_ERR(map->virt); + + map->name = dev_name(dev); + map->bankwidth = 2; + map->device_node = np; + + simple_map_init(map); + ops = ctlr->ops; + if (ops) { + if (ops->read16) + map->read = hyperbus_read16; + if (ops->write16) + map->write = hyperbus_write16; + if (ops->copy_to) + map->copy_to = hyperbus_copy_to; + if (ops->copy_from) + map->copy_from = hyperbus_copy_from; + + if (ops->calibrate && !ctlr->calibrated) { + ret = ops->calibrate(hbdev); + if (!ret) { + dev_err(dev, "Calibration failed\n"); + return -ENODEV; + } + ctlr->calibrated = true; + } + } + + hbdev->mtd = do_map_probe("cfi_probe", map); + if (!hbdev->mtd) { + dev_err(dev, "probing of hyperbus device failed\n"); + return -ENODEV; + } + + hbdev->mtd->dev.parent = dev; + mtd_set_of_node(hbdev->mtd, np); + + ret = mtd_device_register(hbdev->mtd, NULL, 0); + if (ret) { + dev_err(dev, "failed to register mtd device\n"); + map_destroy(hbdev->mtd); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hyperbus_register_device); + +int hyperbus_unregister_device(struct hyperbus_device *hbdev) +{ + int ret = 0; + + if (hbdev && hbdev->mtd) { + ret = mtd_device_unregister(hbdev->mtd); + map_destroy(hbdev->mtd); + } + + return ret; +} +EXPORT_SYMBOL_GPL(hyperbus_unregister_device); + +MODULE_DESCRIPTION("HyperBus Framework"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vignesh Raghavendra "); diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h new file mode 100644 index 000000000000..2dfe65964f6e --- /dev/null +++ b/include/linux/mtd/hyperbus.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef __LINUX_MTD_HYPERBUS_H__ +#define __LINUX_MTD_HYPERBUS_H__ + +#include + +enum hyperbus_memtype { + HYPERFLASH, + HYPERRAM, +}; + +/** + * struct hyperbus_device - struct representing HyperBus slave device + * @map: map_info struct for accessing MMIO HyperBus flash memory + * @np: pointer to HyperBus slave device node + * @mtd: pointer to MTD struct + * @ctlr: pointer to HyperBus controller struct + * @memtype: type of memory device: HyperFlash or HyperRAM + */ + +struct hyperbus_device { + struct map_info map; + struct device_node *np; + struct mtd_info *mtd; + struct hyperbus_ctlr *ctlr; + enum hyperbus_memtype memtype; +}; + +/** + * struct hyperbus_ops - struct representing custom HyperBus operations + * @read16: read 16 bit of data from flash in a single burst. Used to read + * from non default address space, such as ID/CFI space + * @write16: write 16 bit of data to flash in a single burst. Used to + * send cmd to flash or write single 16 bit word at a time. + * @copy_from: copy data from flash memory + * @copy_to: copy data to flash memory + * @calibrate: calibrate HyperBus controller + */ + +struct hyperbus_ops { + u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr); + void (*write16)(struct hyperbus_device *hbdev, + unsigned long addr, u16 val); + void (*copy_from)(struct hyperbus_device *hbdev, void *to, + unsigned long from, ssize_t len); + void (*copy_to)(struct hyperbus_device *dev, unsigned long to, + const void *from, ssize_t len); + int (*calibrate)(struct hyperbus_device *dev); +}; + +/** + * struct hyperbus_ctlr - struct representing HyperBus controller + * @dev: pointer to HyperBus controller device + * @calibrated: flag to indicate ctlr calibration sequence is complete + * @ops: HyperBus controller ops + */ +struct hyperbus_ctlr { + struct device *dev; + bool calibrated; + + const struct hyperbus_ops *ops; +}; + +/** + * hyperbus_register_device - probe and register a HyperBus slave memory device + * @hbdev: hyperbus_device struct with dev, np and ctlr field populated + * + * Return: 0 for success, others for failure. + */ +int hyperbus_register_device(struct hyperbus_device *hbdev); + +/** + * hyperbus_unregister_device - deregister HyperBus slave memory device + * @hbdev: hyperbus_device to be unregistered + * + * Return: 0 for success, others for failure. + */ +int hyperbus_unregister_device(struct hyperbus_device *hbdev); + +#endif /* __LINUX_MTD_HYPERBUS_H__ */ From d7865933af9ed38f39cf085e1def44b637109999 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 25 Jun 2019 13:27:45 +0530 Subject: [PATCH 15/60] dt-bindings: mtd: Add bindings for TI's AM654 HyperBus memory controller Add binding documentation for TI's HyperBus memory controller present on AM654 SoC. Signed-off-by: Vignesh Raghavendra Signed-off-by: Miquel Raynal --- .../devicetree/bindings/mtd/ti,am654-hbmc.txt | 51 +++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 52 insertions(+) create mode 100644 Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt diff --git a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt new file mode 100644 index 000000000000..faa81c2e5da6 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt @@ -0,0 +1,51 @@ +Bindings for HyperBus Memory Controller (HBMC) on TI's K3 family of SoCs + +Required properties: +- compatible : "ti,am654-hbmc" for AM654 SoC +- reg : Two entries: + First entry pointed to the register space of HBMC controller + Second entry pointing to the memory map region dedicated for + MMIO access to attached flash devices +- ranges : Address translation from offset within CS to allocated MMIO + space in SoC + +Optional properties: +- mux-controls : phandle to the multiplexer that controls selection of + HBMC vs OSPI inside Flash SubSystem (FSS). Default is OSPI, + if property is absent. + See Documentation/devicetree/bindings/mux/reg-mux.txt + for mmio-mux binding details + +Example: + + system-controller@47000000 { + compatible = "syscon", "simple-mfd"; + reg = <0x0 0x47000000 0x0 0x100>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + hbmc_mux: multiplexer { + compatible = "mmio-mux"; + #mux-control-cells = <1>; + mux-reg-masks = <0x4 0x2>; /* 0: reg 0x4, bit 1 */ + }; + }; + + hbmc: hyperbus@47034000 { + compatible = "ti,am654-hbmc"; + reg = <0x0 0x47034000 0x0 0x100>, + <0x5 0x00000000 0x1 0x0000000>; + power-domains = <&k3_pds 55>; + #address-cells = <2>; + #size-cells = <1>; + ranges = <0x0 0x0 0x5 0x00000000 0x4000000>, /* CS0 - 64MB */ + <0x1 0x0 0x5 0x04000000 0x4000000>; /* CS1 - 64MB */ + mux-controls = <&hbmc_mux 0>; + + /* Slave flash node */ + flash@0,0 { + compatible = "cypress,hyperflash", "cfi-flash"; + reg = <0x0 0x0 0x4000000>; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index f1253adb8cf6..b3eb96149591 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7311,6 +7311,7 @@ S: Supported F: drivers/mtd/hyperbus/ F: include/linux/mtd/hyperbus.h F: Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt +F: Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt HYPERVISOR VIRTUAL CONSOLE DRIVER L: linuxppc-dev@lists.ozlabs.org From 2099920ebeca38584d645969cb3a8698ca6c3450 Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Fri, 19 Apr 2019 09:47:17 +0200 Subject: [PATCH 16/60] mtd: rawnand: use longest matching pattern Sometimes the exec_op parser does not choose the optimal pattern if multiple patterns with optional elements are available. Since the stack automatically splits operations in multiple exec_op calls, a non-optimal pattern gets broken up into multiple calls. E.g. an OOB read using the vf610 driver: nand: executing subop: nand: ->CMD [0x00] nand: ->ADDR [5 cyc: 00 08 ea 94 02] nand: ->CMD [0x30] nand: ->WAITRDY [max 200000 ms] nand: DATA_IN [64 B] nand: executing subop: nand: CMD [0x00] nand: ADDR [5 cyc: 00 08 ea 94 02] nand: CMD [0x30] nand: WAITRDY [max 200000 ms] nand: ->DATA_IN [64 B] However, the vf610 driver has a pattern which can execute the complete command in a single go... This patch makes sure that the longest matching pattern is chosen instead of the first (potentially only partial) match. With this change the vf610 reads the OOB in a single exec_op call: nand: executing subop: nand: ->CMD [0x00] nand: ->ADDR [5 cyc: 00 08 c0 1d 00] nand: ->CMD [0x30] nand: ->WAITRDY [max 200000 ms] nand: ->DATA_IN [64 B] Reported-by: Sascha Hauer Suggested-by: Boris Brezillon Tested-by: Stefan Agner Signed-off-by: Stefan Agner Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/nand_base.c | 50 +++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 2cf71060d6f8..d565b4a9dce1 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -2156,6 +2156,22 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) } #endif +static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a, + const struct nand_op_parser_ctx *b) +{ + if (a->subop.ninstrs < b->subop.ninstrs) + return -1; + else if (a->subop.ninstrs > b->subop.ninstrs) + return 1; + + if (a->subop.last_instr_end_off < b->subop.last_instr_end_off) + return -1; + else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off) + return 1; + + return 0; +} + /** * nand_op_parser_exec_op - exec_op parser * @chip: the NAND chip @@ -2190,30 +2206,38 @@ int nand_op_parser_exec_op(struct nand_chip *chip, unsigned int i; while (ctx.subop.instrs < op->instrs + op->ninstrs) { - int ret; + const struct nand_op_parser_pattern *pattern; + struct nand_op_parser_ctx best_ctx; + int ret, best_pattern = -1; for (i = 0; i < parser->npatterns; i++) { - const struct nand_op_parser_pattern *pattern; + struct nand_op_parser_ctx test_ctx = ctx; pattern = &parser->patterns[i]; - if (!nand_op_parser_match_pat(pattern, &ctx)) + if (!nand_op_parser_match_pat(pattern, &test_ctx)) continue; - nand_op_parser_trace(&ctx); + if (best_pattern >= 0 && + nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0) + continue; - if (check_only) - break; + best_pattern = i; + best_ctx = test_ctx; + } + if (best_pattern < 0) { + pr_debug("->exec_op() parser: pattern not found!\n"); + return -ENOTSUPP; + } + + ctx = best_ctx; + nand_op_parser_trace(&ctx); + + if (!check_only) { + pattern = &parser->patterns[best_pattern]; ret = pattern->exec(chip, &ctx.subop); if (ret) return ret; - - break; - } - - if (i == parser->npatterns) { - pr_debug("->exec_op() parser: pattern not found!\n"); - return -ENOTSUPP; } /* From e42039452bc73d8d80df19d3b60a43eac2ad3b8b Mon Sep 17 00:00:00 2001 From: Fabien Dessenne Date: Wed, 24 Apr 2019 16:49:06 +0200 Subject: [PATCH 17/60] mtd: rawnand: stm32_fmc2: manage the get_irq error case During probe, check the "get_irq" error value. Signed-off-by: Fabien Dessenne Acked-by: Christophe Kerello Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 999ca6a66036..4aabea25cd7d 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1909,6 +1909,12 @@ static int stm32_fmc2_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); + if (irq < 0) { + if (irq != -EPROBE_DEFER) + dev_err(dev, "IRQ error missing or invalid\n"); + return irq; + } + ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, dev_name(dev), fmc2); if (ret) { From 855eff216a97afa4a2233b792cb3c812b5ebd876 Mon Sep 17 00:00:00 2001 From: Jonathan Bakker Date: Fri, 26 Apr 2019 17:06:34 +0200 Subject: [PATCH 18/60] mtd: onenand: Add support for 8Gb datasize onenand MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used in several S5PV210-based Galaxy S devices, among them SGH-T959V, SGH-T959P, SGH-T839, and SPH-D700. Signed-off-by: Jonathan Bakker Signed-off-by: Paweł Chmiel Signed-off-by: Miquel Raynal --- drivers/mtd/nand/onenand/onenand_base.c | 2 ++ include/linux/mtd/onenand_regs.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index f41d76248550..492c0059673d 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -3260,6 +3260,8 @@ static void onenand_check_features(struct mtd_info *mtd) /* Lock scheme */ switch (density) { + case ONENAND_DEVICE_DENSITY_8Gb: + this->options |= ONENAND_HAS_NOP_1; case ONENAND_DEVICE_DENSITY_4Gb: if (ONENAND_IS_DDP(this)) this->options |= ONENAND_HAS_2PLANE; diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index d60130f88eed..9640d707cbf8 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -80,6 +80,7 @@ #define ONENAND_DEVICE_DENSITY_1Gb (0x003) #define ONENAND_DEVICE_DENSITY_2Gb (0x004) #define ONENAND_DEVICE_DENSITY_4Gb (0x005) +#define ONENAND_DEVICE_DENSITY_8Gb (0x006) /* * Version ID Register F002h (R) From 917cc5945f18fc9464d597b4c1bb50ac745d78b6 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Wed, 1 May 2019 15:22:14 -0400 Subject: [PATCH 19/60] mtd: rawnand: brcmnand: Fix BCH ECC layout for large page NAND parts The way oobregion->offset is derived for large page NAND parts is wrong, fixes it. Fixes: ef5eeea6e911 ("mtd: nand: brcm: switch to mtd_ooblayout_ops") Signed-off-by: Kamal Dasu Reviewed-by: Florian Fainelli Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 873527753f52..f368d1cb89f4 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -931,7 +931,7 @@ static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section, if (section >= sectors) return -ERANGE; - oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes; + oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes; oobregion->length = chip->ecc.bytes; return 0; From e1884ffddacc0424d7e785e6f8087bd12f7196db Mon Sep 17 00:00:00 2001 From: Xiaolei Li Date: Tue, 7 May 2019 18:25:38 +0800 Subject: [PATCH 20/60] mtd: rawnand: mtk: Correct low level time calculation of r/w cycle At present, the flow of calculating AC timing of read/write cycle in SDR mode is that: At first, calculate high hold time which is valid for both read and write cycle using the max value between tREH_min and tWH_min. Secondly, calculate WE# pulse width using tWP_min. Thridly, calculate RE# pulse width using the bigger one between tREA_max and tRP_min. But NAND SPEC shows that Controller should also meet write/read cycle time. That is write cycle time should be more than tWC_min and read cycle should be more than tRC_min. Obviously, we do not achieve that now. This patch corrects the low level time calculation to meet minimum read/write cycle time required. After getting the high hold time, WE# low level time will be promised to meet tWP_min and tWC_min requirement, and RE# low level time will be promised to meet tREA_max, tRP_min and tRC_min requirement. Fixes: edfee3619c49 ("mtd: nand: mtk: add ->setup_data_interface() hook") Cc: stable@vger.kernel.org # v4.17+ Signed-off-by: Xiaolei Li Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/mtk_nand.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index dceff28c9a31..23fe19397315 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -500,7 +500,8 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, { struct mtk_nfc *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *timings; - u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt; + u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0; + u32 thold; timings = nand_get_sdr_timings(conf); if (IS_ERR(timings)) @@ -536,11 +537,28 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, twh = DIV_ROUND_UP(twh * rate, 1000000) - 1; twh &= 0xf; - twst = timings->tWP_min / 1000; + /* Calculate real WE#/RE# hold time in nanosecond */ + thold = (twh + 1) * 1000000 / rate; + /* nanosecond to picosecond */ + thold *= 1000; + + /* + * WE# low level time should be expaned to meet WE# pulse time + * and WE# cycle time at the same time. + */ + if (thold < timings->tWC_min) + twst = timings->tWC_min - thold; + twst = max(timings->tWP_min, twst) / 1000; twst = DIV_ROUND_UP(twst * rate, 1000000) - 1; twst &= 0xf; - trlt = max(timings->tREA_max, timings->tRP_min) / 1000; + /* + * RE# low level time should be expaned to meet RE# pulse time, + * RE# access time and RE# cycle time at the same time. + */ + if (thold < timings->tRC_min) + trlt = timings->tRC_min - thold; + trlt = max3(trlt, timings->tREA_max, timings->tRP_min) / 1000; trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1; trlt &= 0xf; From 42d13a09efa4f0ea33231f3b145356ee487987b3 Mon Sep 17 00:00:00 2001 From: Xiaolei Li Date: Tue, 7 May 2019 18:25:39 +0800 Subject: [PATCH 21/60] mtd: rawnand: mtk: Improve data sampling timing for read cycle Currently, we expand RE# low level time by choosing the max value between RE# pulse width and RE# access time, and sample data at the rising edge of RE#. Then, if RE# access time is bigger than RE# pulse width, the real read cycle time may be more than NAND SPEC required. This makes read performance be worse than that expected. This patch improves data sampling timing by calculating RE# low level time according to RE# pulse width. If RE# access time is bigger than RE# pulse width, then delay sampling data timing. The result of contrast test base on MT2712 evaluat board is as follow. nand: Micron MT29F16G08ADBCAH4 nand: 2048 MiB, SLC, erase size: 256 KiB, page size: 4096, OOB size: 224 NFI 2x clock rate: 124800000 HZ. Read speed without this patch: mtd_speedtest: page read speed is 14012 KiB/s mtd_speedtest: 2 page read speed is 14860 KiB/s Read speed with this patch: mtd_speedtest: page read speed is 18724 KiB/s mtd_speedtest: 2 page read speed is 18713 KiB/s Signed-off-by: Xiaolei Li Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/mtk_nand.c | 46 ++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 23fe19397315..03aff7e9bea5 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -79,6 +79,10 @@ #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2) #define NFI_FDM_MAX_SIZE (8) #define NFI_FDM_MIN_SIZE (1) +#define NFI_DEBUG_CON1 (0x220) +#define STROBE_MASK GENMASK(4, 3) +#define STROBE_SHIFT (3) +#define MAX_STROBE_DLY (3) #define NFI_MASTER_STA (0x224) #define MASTER_STA_MASK (0x0FFF) #define NFI_EMPTY_THRESH (0x23C) @@ -501,7 +505,7 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, struct mtk_nfc *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *timings; u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0; - u32 thold; + u32 temp, tsel = 0; timings = nand_get_sdr_timings(conf); if (IS_ERR(timings)) @@ -538,30 +542,52 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, twh &= 0xf; /* Calculate real WE#/RE# hold time in nanosecond */ - thold = (twh + 1) * 1000000 / rate; + temp = (twh + 1) * 1000000 / rate; /* nanosecond to picosecond */ - thold *= 1000; + temp *= 1000; /* * WE# low level time should be expaned to meet WE# pulse time * and WE# cycle time at the same time. */ - if (thold < timings->tWC_min) - twst = timings->tWC_min - thold; + if (temp < timings->tWC_min) + twst = timings->tWC_min - temp; twst = max(timings->tWP_min, twst) / 1000; twst = DIV_ROUND_UP(twst * rate, 1000000) - 1; twst &= 0xf; /* - * RE# low level time should be expaned to meet RE# pulse time, - * RE# access time and RE# cycle time at the same time. + * RE# low level time should be expaned to meet RE# pulse time + * and RE# cycle time at the same time. */ - if (thold < timings->tRC_min) - trlt = timings->tRC_min - thold; - trlt = max3(trlt, timings->tREA_max, timings->tRP_min) / 1000; + if (temp < timings->tRC_min) + trlt = timings->tRC_min - temp; + trlt = max(trlt, timings->tRP_min) / 1000; trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1; trlt &= 0xf; + /* Calculate RE# pulse time in nanosecond. */ + temp = (trlt + 1) * 1000000 / rate; + /* nanosecond to picosecond */ + temp *= 1000; + /* + * If RE# access time is bigger than RE# pulse time, + * delay sampling data timing. + */ + if (temp < timings->tREA_max) { + tsel = timings->tREA_max / 1000; + tsel = DIV_ROUND_UP(tsel * rate, 1000000); + tsel -= (trlt + 1); + if (tsel > MAX_STROBE_DLY) { + trlt += tsel - MAX_STROBE_DLY; + tsel = MAX_STROBE_DLY; + } + } + temp = nfi_readl(nfc, NFI_DEBUG_CON1); + temp &= ~STROBE_MASK; + temp |= tsel << STROBE_SHIFT; + nfi_writel(nfc, temp, NFI_DEBUG_CON1); + /* * ACCON: access timing control register * ------------------------------------- From 8dbd7b103fc3eac53ab8a20b3cd1a5cb26c0dcbb Mon Sep 17 00:00:00 2001 From: Xiaolei Li Date: Tue, 7 May 2019 18:25:40 +0800 Subject: [PATCH 22/60] mtd: rawnand: mtk: Add validity check for CE# pin setting Currently, we only check how many CE# pins are set in device tree. But it should be necessary to check whether CE# pin setting is duplicated or if CE# pin index exceeds the maximum CE# number that controller supports. So, add validity check to avoid these invalid settings. Signed-off-by: Xiaolei Li Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/mtk_nand.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 03aff7e9bea5..d3f00d628fa6 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -154,6 +154,8 @@ struct mtk_nfc { struct list_head chips; u8 *buffer; + + unsigned long assigned_cs; }; /* @@ -1359,6 +1361,17 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, dev_err(dev, "reg property failure : %d\n", ret); return ret; } + + if (tmp >= MTK_NAND_MAX_NSELS) { + dev_err(dev, "invalid CS: %u\n", tmp); + return -EINVAL; + } + + if (test_and_set_bit(tmp, &nfc->assigned_cs)) { + dev_err(dev, "CS %u already assigned\n", tmp); + return -EINVAL; + } + chip->sels[i] = tmp; } From 336d4b138be2dad372b67a2388e42805c48aaa38 Mon Sep 17 00:00:00 2001 From: Xiaolei Li Date: Tue, 7 May 2019 18:25:41 +0800 Subject: [PATCH 23/60] mtd: rawnand: mtk: Fix wrongly assigned OOB buffer pointer issue One main goal of the function mtk_nfc_update_ecc_stats is to check whether sectors are all empty. If they are empty, set these sectors's data buffer and OOB buffer as 0xff. But now, the sector OOB buffer pointer is wrongly assigned. We always do memset from sector 0. To fix this issue, pass start sector number to make OOB buffer pointer be properly assigned. Fixes: 1d6b1e464950 ("mtd: mediatek: driver for MTK Smart Device") Signed-off-by: Xiaolei Li Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/mtk_nand.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index d3f00d628fa6..47291bc53350 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -881,19 +881,21 @@ static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page) return mtk_nfc_write_page_raw(chip, NULL, 1, page); } -static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start, + u32 sectors) { struct nand_chip *chip = mtd_to_nand(mtd); struct mtk_nfc *nfc = nand_get_controller_data(chip); struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); struct mtk_ecc_stats stats; + u32 reg_size = mtk_nand->fdm.reg_size; int rc, i; rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; if (rc) { memset(buf, 0xff, sectors * chip->ecc.size); for (i = 0; i < sectors; i++) - memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); + memset(oob_ptr(chip, start + i), 0xff, reg_size); return 0; } @@ -913,7 +915,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, u32 spare = mtk_nand->spare_per_sector; u32 column, sectors, start, end, reg; dma_addr_t addr; - int bitflips; + int bitflips = 0; size_t len; u8 *buf; int rc; @@ -980,14 +982,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, if (rc < 0) { dev_err(nfc->dev, "subpage done timeout\n"); bitflips = -EIO; - } else { - bitflips = 0; - if (!raw) { - rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); - bitflips = rc < 0 ? -ETIMEDOUT : - mtk_nfc_update_ecc_stats(mtd, buf, sectors); - mtk_nfc_read_fdm(chip, start, sectors); - } + } else if (!raw) { + rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); + bitflips = rc < 0 ? -ETIMEDOUT : + mtk_nfc_update_ecc_stats(mtd, buf, start, sectors); + mtk_nfc_read_fdm(chip, start, sectors); } dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); From 14a82ea7e1682645d942d9fb41fcb6126fd1645e Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:30 +0200 Subject: [PATCH 24/60] mtd: rawnand: export NAND operation tracer The NAND core has a NAND operation tracing function, but it can only be used by drivers using the generic option parser from the NAND core. Export the tracing function as a static inline function in rawnand.h so that drivers implementing exec_op directly do not have to write their own operation tracing. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/nand_base.c | 30 +------------------------- include/linux/mtd/rawnand.h | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index d565b4a9dce1..6ecd1c496ce3 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -2115,35 +2115,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) if (instr == &ctx->subop.instrs[0]) prefix = " ->"; - switch (instr->type) { - case NAND_OP_CMD_INSTR: - pr_debug("%sCMD [0x%02x]\n", prefix, - instr->ctx.cmd.opcode); - break; - case NAND_OP_ADDR_INSTR: - pr_debug("%sADDR [%d cyc: %*ph]\n", prefix, - instr->ctx.addr.naddrs, - instr->ctx.addr.naddrs < 64 ? - instr->ctx.addr.naddrs : 64, - instr->ctx.addr.addrs); - break; - case NAND_OP_DATA_IN_INSTR: - pr_debug("%sDATA_IN [%d B%s]\n", prefix, - instr->ctx.data.len, - instr->ctx.data.force_8bit ? - ", force 8-bit" : ""); - break; - case NAND_OP_DATA_OUT_INSTR: - pr_debug("%sDATA_OUT [%d B%s]\n", prefix, - instr->ctx.data.len, - instr->ctx.data.force_8bit ? - ", force 8-bit" : ""); - break; - case NAND_OP_WAITRDY_INSTR: - pr_debug("%sWAITRDY [max %d ms]\n", prefix, - instr->ctx.waitrdy.timeout_ms); - break; - } + nand_op_trace(prefix, instr); if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) prefix = " "; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index dbfffa5bec7b..f5bb6f11c36b 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -877,6 +877,42 @@ int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only); +static inline void nand_op_trace(const char *prefix, + const struct nand_op_instr *instr) +{ +#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) + switch (instr->type) { + case NAND_OP_CMD_INSTR: + pr_debug("%sCMD [0x%02x]\n", prefix, + instr->ctx.cmd.opcode); + break; + case NAND_OP_ADDR_INSTR: + pr_debug("%sADDR [%d cyc: %*ph]\n", prefix, + instr->ctx.addr.naddrs, + instr->ctx.addr.naddrs < 64 ? + instr->ctx.addr.naddrs : 64, + instr->ctx.addr.addrs); + break; + case NAND_OP_DATA_IN_INSTR: + pr_debug("%sDATA_IN [%d B%s]\n", prefix, + instr->ctx.data.len, + instr->ctx.data.force_8bit ? + ", force 8-bit" : ""); + break; + case NAND_OP_DATA_OUT_INSTR: + pr_debug("%sDATA_OUT [%d B%s]\n", prefix, + instr->ctx.data.len, + instr->ctx.data.force_8bit ? + ", force 8-bit" : ""); + break; + case NAND_OP_WAITRDY_INSTR: + pr_debug("%sWAITRDY [max %d ms]\n", prefix, + instr->ctx.waitrdy.timeout_ms); + break; + } +#endif +} + /** * struct nand_controller_ops - Controller operations * From bf828322282608a03060daba2760a4c00f43ed90 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:31 +0200 Subject: [PATCH 25/60] mtd: rawnand: fsmc: Use nand_op_trace for operation tracing Replace the different operation tracing functions with a call to nand_op_trace. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/fsmc_nand.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 6c7ca41354be..a6964feeec77 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -613,28 +613,20 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, for (op_id = 0; op_id < op->ninstrs; op_id++) { instr = &op->instrs[op_id]; + nand_op_trace(" ", instr); + switch (instr->type) { case NAND_OP_CMD_INSTR: - pr_debug(" ->CMD [0x%02x]\n", - instr->ctx.cmd.opcode); - writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va); break; case NAND_OP_ADDR_INSTR: - pr_debug(" ->ADDR [%d cyc]", - instr->ctx.addr.naddrs); - for (i = 0; i < instr->ctx.addr.naddrs; i++) writeb_relaxed(instr->ctx.addr.addrs[i], host->addr_va); break; case NAND_OP_DATA_IN_INSTR: - pr_debug(" ->DATA_IN [%d B%s]\n", instr->ctx.data.len, - instr->ctx.data.force_8bit ? - ", force 8-bit" : ""); - if (host->mode == USE_DMA_ACCESS) fsmc_read_buf_dma(host, instr->ctx.data.buf.in, instr->ctx.data.len); @@ -644,10 +636,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, break; case NAND_OP_DATA_OUT_INSTR: - pr_debug(" ->DATA_OUT [%d B%s]\n", instr->ctx.data.len, - instr->ctx.data.force_8bit ? - ", force 8-bit" : ""); - if (host->mode == USE_DMA_ACCESS) fsmc_write_buf_dma(host, instr->ctx.data.buf.out, @@ -658,9 +646,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, break; case NAND_OP_WAITRDY_INSTR: - pr_debug(" ->WAITRDY [max %d ms]\n", - instr->ctx.waitrdy.timeout_ms); - ret = nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms); break; From 3045f8e369634152b5a9389fa11e766ae7696bdf Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 9 Apr 2019 13:34:10 +0200 Subject: [PATCH 26/60] mtd: rawnand: gpmi: move all driver code into single file This moves the whole driver into a single C file. The filename gpmi-lib implies that it implements library functions, but in fact there are several cases where functions in gpmi-lib.c call back into functions in gpmi-nand.c. With this one has to constantly jump between those two files, so moving it into a single file improves readability, even when the file gets quite large. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/Makefile | 1 - drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c | 934 --------------- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 1247 +++++++++++++++++--- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h | 34 - 4 files changed, 1084 insertions(+), 1132 deletions(-) delete mode 100644 drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c diff --git a/drivers/mtd/nand/raw/gpmi-nand/Makefile b/drivers/mtd/nand/raw/gpmi-nand/Makefile index 30ceee9704d1..9bd81a31e02e 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/Makefile +++ b/drivers/mtd/nand/raw/gpmi-nand/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o gpmi_nand-objs += gpmi-nand.o -gpmi_nand-objs += gpmi-lib.o diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c deleted file mode 100644 index a8b26d2e793c..000000000000 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c +++ /dev/null @@ -1,934 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Freescale GPMI NAND Flash Driver - * - * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. - * Copyright (C) 2008 Embedded Alley Solutions, Inc. - */ -#include -#include -#include - -#include "gpmi-nand.h" -#include "gpmi-regs.h" -#include "bch-regs.h" - -/* Converts time to clock cycles */ -#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period) - -#define MXS_SET_ADDR 0x4 -#define MXS_CLR_ADDR 0x8 -/* - * Clear the bit and poll it cleared. This is usually called with - * a reset address and mask being either SFTRST(bit 31) or CLKGATE - * (bit 30). - */ -static int clear_poll_bit(void __iomem *addr, u32 mask) -{ - int timeout = 0x400; - - /* clear the bit */ - writel(mask, addr + MXS_CLR_ADDR); - - /* - * SFTRST needs 3 GPMI clocks to settle, the reference manual - * recommends to wait 1us. - */ - udelay(1); - - /* poll the bit becoming clear */ - while ((readl(addr) & mask) && --timeout) - /* nothing */; - - return !timeout; -} - -#define MODULE_CLKGATE (1 << 30) -#define MODULE_SFTRST (1 << 31) -/* - * The current mxs_reset_block() will do two things: - * [1] enable the module. - * [2] reset the module. - * - * In most of the cases, it's ok. - * But in MX23, there is a hardware bug in the BCH block (see erratum #2847). - * If you try to soft reset the BCH block, it becomes unusable until - * the next hard reset. This case occurs in the NAND boot mode. When the board - * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. - * So If the driver tries to reset the BCH again, the BCH will not work anymore. - * You will see a DMA timeout in this case. The bug has been fixed - * in the following chips, such as MX28. - * - * To avoid this bug, just add a new parameter `just_enable` for - * the mxs_reset_block(), and rewrite it here. - */ -static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) -{ - int ret; - int timeout = 0x400; - - /* clear and poll SFTRST */ - ret = clear_poll_bit(reset_addr, MODULE_SFTRST); - if (unlikely(ret)) - goto error; - - /* clear CLKGATE */ - writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); - - if (!just_enable) { - /* set SFTRST to reset the block */ - writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR); - udelay(1); - - /* poll CLKGATE becoming set */ - while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) - /* nothing */; - if (unlikely(!timeout)) - goto error; - } - - /* clear and poll SFTRST */ - ret = clear_poll_bit(reset_addr, MODULE_SFTRST); - if (unlikely(ret)) - goto error; - - /* clear and poll CLKGATE */ - ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); - if (unlikely(ret)) - goto error; - - return 0; - -error: - pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); - return -ETIMEDOUT; -} - -static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v) -{ - struct clk *clk; - int ret; - int i; - - for (i = 0; i < GPMI_CLK_MAX; i++) { - clk = this->resources.clock[i]; - if (!clk) - break; - - if (v) { - ret = clk_prepare_enable(clk); - if (ret) - goto err_clk; - } else { - clk_disable_unprepare(clk); - } - } - return 0; - -err_clk: - for (; i > 0; i--) - clk_disable_unprepare(this->resources.clock[i - 1]); - return ret; -} - -int gpmi_enable_clk(struct gpmi_nand_data *this) -{ - return __gpmi_enable_clk(this, true); -} - -int gpmi_disable_clk(struct gpmi_nand_data *this) -{ - return __gpmi_enable_clk(this, false); -} - -int gpmi_init(struct gpmi_nand_data *this) -{ - struct resources *r = &this->resources; - int ret; - - ret = gpmi_enable_clk(this); - if (ret) - return ret; - ret = gpmi_reset_block(r->gpmi_regs, false); - if (ret) - goto err_out; - - /* - * Reset BCH here, too. We got failures otherwise :( - * See later BCH reset for explanation of MX23 and MX28 handling - */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); - if (ret) - goto err_out; - - /* Choose NAND mode. */ - writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); - - /* Set the IRQ polarity. */ - writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, - r->gpmi_regs + HW_GPMI_CTRL1_SET); - - /* Disable Write-Protection. */ - writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET); - - /* Select BCH ECC. */ - writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); - - /* - * Decouple the chip select from dma channel. We use dma0 for all - * the chips. - */ - writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); - - gpmi_disable_clk(this); - return 0; -err_out: - gpmi_disable_clk(this); - return ret; -} - -/* This function is very useful. It is called only when the bug occur. */ -void gpmi_dump_info(struct gpmi_nand_data *this) -{ - struct resources *r = &this->resources; - struct bch_geometry *geo = &this->bch_geometry; - u32 reg; - int i; - - dev_err(this->dev, "Show GPMI registers :\n"); - for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) { - reg = readl(r->gpmi_regs + i * 0x10); - dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); - } - - /* start to print out the BCH info */ - dev_err(this->dev, "Show BCH registers :\n"); - for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) { - reg = readl(r->bch_regs + i * 0x10); - dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); - } - dev_err(this->dev, "BCH Geometry :\n" - "GF length : %u\n" - "ECC Strength : %u\n" - "Page Size in Bytes : %u\n" - "Metadata Size in Bytes : %u\n" - "ECC Chunk Size in Bytes: %u\n" - "ECC Chunk Count : %u\n" - "Payload Size in Bytes : %u\n" - "Auxiliary Size in Bytes: %u\n" - "Auxiliary Status Offset: %u\n" - "Block Mark Byte Offset : %u\n" - "Block Mark Bit Offset : %u\n", - geo->gf_len, - geo->ecc_strength, - geo->page_size, - geo->metadata_size, - geo->ecc_chunk_size, - geo->ecc_chunk_count, - geo->payload_size, - geo->auxiliary_size, - geo->auxiliary_status_offset, - geo->block_mark_byte_offset, - geo->block_mark_bit_offset); -} - -/* Configures the geometry for BCH. */ -int bch_set_geometry(struct gpmi_nand_data *this) -{ - struct resources *r = &this->resources; - struct bch_geometry *bch_geo = &this->bch_geometry; - unsigned int block_count; - unsigned int block_size; - unsigned int metadata_size; - unsigned int ecc_strength; - unsigned int page_size; - unsigned int gf_len; - int ret; - - ret = common_nfc_set_geometry(this); - if (ret) - return ret; - - block_count = bch_geo->ecc_chunk_count - 1; - block_size = bch_geo->ecc_chunk_size; - metadata_size = bch_geo->metadata_size; - ecc_strength = bch_geo->ecc_strength >> 1; - page_size = bch_geo->page_size; - gf_len = bch_geo->gf_len; - - ret = gpmi_enable_clk(this); - if (ret) - return ret; - - /* - * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this - * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. - * and MX28. - */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); - if (ret) - goto err_out; - - /* Configure layout 0. */ - writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) - | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) - | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) - | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) - | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this), - r->bch_regs + HW_BCH_FLASH0LAYOUT0); - - writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) - | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) - | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) - | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this), - r->bch_regs + HW_BCH_FLASH0LAYOUT1); - - /* Set *all* chip selects to use layout 0. */ - writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT); - - /* Enable interrupts. */ - writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, - r->bch_regs + HW_BCH_CTRL_SET); - - gpmi_disable_clk(this); - return 0; -err_out: - gpmi_disable_clk(this); - return ret; -} - -/* - * <1> Firstly, we should know what's the GPMI-clock means. - * The GPMI-clock is the internal clock in the gpmi nand controller. - * If you set 100MHz to gpmi nand controller, the GPMI-clock's period - * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. - * - * <2> Secondly, we should know what's the frequency on the nand chip pins. - * The frequency on the nand chip pins is derived from the GPMI-clock. - * We can get it from the following equation: - * - * F = G / (DS + DH) - * - * F : the frequency on the nand chip pins. - * G : the GPMI clock, such as 100MHz. - * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP - * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD - * - * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, - * the nand EDO(extended Data Out) timing could be applied. - * The GPMI implements a feedback read strobe to sample the read data. - * The feedback read strobe can be delayed to support the nand EDO timing - * where the read strobe may deasserts before the read data is valid, and - * read data is valid for some time after read strobe. - * - * The following figure illustrates some aspects of a NAND Flash read: - * - * |<---tREA---->| - * | | - * | | | - * |<--tRP-->| | - * | | | - * __ ___|__________________________________ - * RDN \________/ | - * | - * /---------\ - * Read Data --------------< >--------- - * \---------/ - * | | - * |<-D->| - * FeedbackRDN ________ ____________ - * \___________/ - * - * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. - * - * - * <4> Now, we begin to describe how to compute the right RDN_DELAY. - * - * 4.1) From the aspect of the nand chip pins: - * Delay = (tREA + C - tRP) {1} - * - * tREA : the maximum read access time. - * C : a constant to adjust the delay. default is 4000ps. - * tRP : the read pulse width, which is exactly: - * tRP = (GPMI-clock-period) * DATA_SETUP - * - * 4.2) From the aspect of the GPMI nand controller: - * Delay = RDN_DELAY * 0.125 * RP {2} - * - * RP : the DLL reference period. - * if (GPMI-clock-period > DLL_THRETHOLD) - * RP = GPMI-clock-period / 2; - * else - * RP = GPMI-clock-period; - * - * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period - * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD - * is 16000ps, but in mx6q, we use 12000ps. - * - * 4.3) since {1} equals {2}, we get: - * - * (tREA + 4000 - tRP) * 8 - * RDN_DELAY = ----------------------- {3} - * RP - */ -static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, - const struct nand_sdr_timings *sdr) -{ - struct gpmi_nfc_hardware_timing *hw = &this->hw; - unsigned int dll_threshold_ps = this->devdata->max_chain_delay; - unsigned int period_ps, reference_period_ps; - unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; - unsigned int tRP_ps; - bool use_half_period; - int sample_delay_ps, sample_delay_factor; - u16 busy_timeout_cycles; - u8 wrn_dly_sel; - - if (sdr->tRC_min >= 30000) { - /* ONFI non-EDO modes [0-3] */ - hw->clk_rate = 22000000; - wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; - } else if (sdr->tRC_min >= 25000) { - /* ONFI EDO mode 4 */ - hw->clk_rate = 80000000; - wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; - } else { - /* ONFI EDO mode 5 */ - hw->clk_rate = 100000000; - wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; - } - - /* SDR core timings are given in picoseconds */ - period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); - - addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); - data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); - data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); - busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); - - hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | - BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | - BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); - hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096); - - /* - * Derive NFC ideal delay from {3}: - * - * (tREA + 4000 - tRP) * 8 - * RDN_DELAY = ----------------------- - * RP - */ - if (period_ps > dll_threshold_ps) { - use_half_period = true; - reference_period_ps = period_ps / 2; - } else { - use_half_period = false; - reference_period_ps = period_ps; - } - - tRP_ps = data_setup_cycles * period_ps; - sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8; - if (sample_delay_ps > 0) - sample_delay_factor = sample_delay_ps / reference_period_ps; - else - sample_delay_factor = 0; - - hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel); - if (sample_delay_factor) - hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) | - BM_GPMI_CTRL1_DLL_ENABLE | - (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); -} - -void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) -{ - struct gpmi_nfc_hardware_timing *hw = &this->hw; - struct resources *r = &this->resources; - void __iomem *gpmi_regs = r->gpmi_regs; - unsigned int dll_wait_time_us; - - clk_set_rate(r->clock[0], hw->clk_rate); - - writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0); - writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1); - - /* - * Clear several CTRL1 fields, DLL must be disabled when setting - * RDN_DELAY or HALF_PERIOD. - */ - writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR); - writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET); - - /* Wait 64 clock cycles before using the GPMI after enabling the DLL */ - dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64; - if (!dll_wait_time_us) - dll_wait_time_us = 1; - - /* Wait for the DLL to settle. */ - udelay(dll_wait_time_us); -} - -int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) -{ - struct gpmi_nand_data *this = nand_get_controller_data(chip); - const struct nand_sdr_timings *sdr; - - /* Retrieve required NAND timings */ - sdr = nand_get_sdr_timings(conf); - if (IS_ERR(sdr)) - return PTR_ERR(sdr); - - /* Only MX6 GPMI controller can reach EDO timings */ - if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this)) - return -ENOTSUPP; - - /* Stop here if this call was just a check */ - if (chipnr < 0) - return 0; - - /* Do the actual derivation of the controller timings */ - gpmi_nfc_compute_timings(this, sdr); - - this->hw.must_apply_timings = true; - - return 0; -} - -/* Clears a BCH interrupt. */ -void gpmi_clear_bch(struct gpmi_nand_data *this) -{ - struct resources *r = &this->resources; - writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR); -} - -/* Returns the Ready/Busy status of the given chip. */ -int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip) -{ - struct resources *r = &this->resources; - uint32_t mask = 0; - uint32_t reg = 0; - - if (GPMI_IS_MX23(this)) { - mask = MX23_BM_GPMI_DEBUG_READY0 << chip; - reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); - } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) { - /* - * In the imx6, all the ready/busy pins are bound - * together. So we only need to check chip 0. - */ - if (GPMI_IS_MX6(this)) - chip = 0; - - /* MX28 shares the same R/B register as MX6Q. */ - mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); - reg = readl(r->gpmi_regs + HW_GPMI_STAT); - } else - dev_err(this->dev, "unknown arch.\n"); - return reg & mask; -} - -int gpmi_send_command(struct gpmi_nand_data *this) -{ - struct dma_chan *channel = get_dma_chan(this); - struct dma_async_tx_descriptor *desc; - struct scatterlist *sgl; - int chip = this->current_chip; - int ret; - u32 pio[3]; - - /* [1] send out the PIO words */ - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) - | BM_GPMI_CTRL0_ADDRESS_INCREMENT - | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); - pio[1] = pio[2] = 0; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] send out the COMMAND + ADDRESS string stored in @buffer */ - sgl = &this->cmd_sgl; - - sg_init_one(sgl, this->cmd_buffer, this->command_length); - dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); - desc = dmaengine_prep_slave_sg(channel, - sgl, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) - return -EINVAL; - - /* [3] submit the DMA */ - ret = start_dma_without_bch_irq(this, desc); - - dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE); - - return ret; -} - -int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len) -{ - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - int ret; - uint32_t command_mode; - uint32_t address; - u32 pio[2]; - - /* [1] PIO */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(len); - pio[1] = 0; - desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] send DMA request */ - prepare_data_dma(this, buf, len, DMA_TO_DEVICE); - desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) - return -EINVAL; - - /* [3] submit the DMA */ - ret = start_dma_without_bch_irq(this, desc); - - dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); - - return ret; -} - -int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len) -{ - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - int ret; - u32 pio[2]; - bool direct; - - /* [1] : send PIO */ - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) - | BF_GPMI_CTRL0_XFER_COUNT(len); - pio[1] = 0; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] : send DMA request */ - direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE); - desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) - return -EINVAL; - - /* [3] : submit the DMA */ - - ret = start_dma_without_bch_irq(this, desc); - - dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); - if (!direct) - memcpy(buf, this->data_buffer_dma, len); - - return ret; -} - -int gpmi_send_page(struct gpmi_nand_data *this, - dma_addr_t payload, dma_addr_t auxiliary) -{ - struct bch_geometry *geo = &this->bch_geometry; - uint32_t command_mode; - uint32_t address; - uint32_t ecc_command; - uint32_t buffer_mask; - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - u32 pio[6]; - - /* A DMA descriptor that does an ECC page read. */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE; - buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | - BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(0); - pio[1] = 0; - pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC - | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) - | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); - pio[3] = geo->page_size; - pio[4] = payload; - pio[5] = auxiliary; - - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, - DMA_CTRL_ACK); - if (!desc) - return -EINVAL; - - return start_dma_with_bch_irq(this, desc); -} - -int gpmi_read_page(struct gpmi_nand_data *this, - dma_addr_t payload, dma_addr_t auxiliary) -{ - struct bch_geometry *geo = &this->bch_geometry; - uint32_t command_mode; - uint32_t address; - uint32_t ecc_command; - uint32_t buffer_mask; - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - u32 pio[6]; - - /* [1] Wait for the chip to report ready. */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(0); - pio[1] = 0; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, 2, - DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] Enable the BCH block and read. */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE; - buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE - | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); - - pio[1] = 0; - pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC - | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) - | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); - pio[3] = geo->page_size; - pio[4] = payload; - pio[5] = auxiliary; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) - return -EINVAL; - - /* [3] Disable the BCH block */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); - pio[1] = 0; - pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */ - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, 3, - DMA_TRANS_NONE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc) - return -EINVAL; - - /* [4] submit the DMA */ - return start_dma_with_bch_irq(this, desc); -} - -/** - * gpmi_copy_bits - copy bits from one memory region to another - * @dst: destination buffer - * @dst_bit_off: bit offset we're starting to write at - * @src: source buffer - * @src_bit_off: bit offset we're starting to read from - * @nbits: number of bits to copy - * - * This functions copies bits from one memory region to another, and is used by - * the GPMI driver to copy ECC sections which are not guaranteed to be byte - * aligned. - * - * src and dst should not overlap. - * - */ -void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, - const u8 *src, size_t src_bit_off, - size_t nbits) -{ - size_t i; - size_t nbytes; - u32 src_buffer = 0; - size_t bits_in_src_buffer = 0; - - if (!nbits) - return; - - /* - * Move src and dst pointers to the closest byte pointer and store bit - * offsets within a byte. - */ - src += src_bit_off / 8; - src_bit_off %= 8; - - dst += dst_bit_off / 8; - dst_bit_off %= 8; - - /* - * Initialize the src_buffer value with bits available in the first - * byte of data so that we end up with a byte aligned src pointer. - */ - if (src_bit_off) { - src_buffer = src[0] >> src_bit_off; - if (nbits >= (8 - src_bit_off)) { - bits_in_src_buffer += 8 - src_bit_off; - } else { - src_buffer &= GENMASK(nbits - 1, 0); - bits_in_src_buffer += nbits; - } - nbits -= bits_in_src_buffer; - src++; - } - - /* Calculate the number of bytes that can be copied from src to dst. */ - nbytes = nbits / 8; - - /* Try to align dst to a byte boundary. */ - if (dst_bit_off) { - if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) { - src_buffer |= src[0] << bits_in_src_buffer; - bits_in_src_buffer += 8; - src++; - nbytes--; - } - - if (bits_in_src_buffer >= (8 - dst_bit_off)) { - dst[0] &= GENMASK(dst_bit_off - 1, 0); - dst[0] |= src_buffer << dst_bit_off; - src_buffer >>= (8 - dst_bit_off); - bits_in_src_buffer -= (8 - dst_bit_off); - dst_bit_off = 0; - dst++; - if (bits_in_src_buffer > 7) { - bits_in_src_buffer -= 8; - dst[0] = src_buffer; - dst++; - src_buffer >>= 8; - } - } - } - - if (!bits_in_src_buffer && !dst_bit_off) { - /* - * Both src and dst pointers are byte aligned, thus we can - * just use the optimized memcpy function. - */ - if (nbytes) - memcpy(dst, src, nbytes); - } else { - /* - * src buffer is not byte aligned, hence we have to copy each - * src byte to the src_buffer variable before extracting a byte - * to store in dst. - */ - for (i = 0; i < nbytes; i++) { - src_buffer |= src[i] << bits_in_src_buffer; - dst[i] = src_buffer; - src_buffer >>= 8; - } - } - /* Update dst and src pointers */ - dst += nbytes; - src += nbytes; - - /* - * nbits is the number of remaining bits. It should not exceed 8 as - * we've already copied as much bytes as possible. - */ - nbits %= 8; - - /* - * If there's no more bits to copy to the destination and src buffer - * was already byte aligned, then we're done. - */ - if (!nbits && !bits_in_src_buffer) - return; - - /* Copy the remaining bits to src_buffer */ - if (nbits) - src_buffer |= (*src & GENMASK(nbits - 1, 0)) << - bits_in_src_buffer; - bits_in_src_buffer += nbits; - - /* - * In case there were not enough bits to get a byte aligned dst buffer - * prepare the src_buffer variable to match the dst organization (shift - * src_buffer by dst_bit_off and retrieve the least significant bits - * from dst). - */ - if (dst_bit_off) - src_buffer = (src_buffer << dst_bit_off) | - (*dst & GENMASK(dst_bit_off - 1, 0)); - bits_in_src_buffer += dst_bit_off; - - /* - * Keep most significant bits from dst if we end up with an unaligned - * number of bits. - */ - nbytes = bits_in_src_buffer / 8; - if (bits_in_src_buffer % 8) { - src_buffer |= (dst[nbytes] & - GENMASK(7, bits_in_src_buffer % 8)) << - (nbytes * 8); - nbytes++; - } - - /* Copy the remaining bytes to dst */ - for (i = 0; i < nbytes; i++) { - dst[i] = src_buffer; - src_buffer >>= 8; - } -} diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 40df20d1adf5..53e63eeafcf4 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -6,6 +6,7 @@ * Copyright (C) 2008 Embedded Alley Solutions, Inc. */ #include +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include "gpmi-nand.h" +#include "gpmi-regs.h" #include "bch-regs.h" /* Resource names for the GPMI NAND driver. */ @@ -21,149 +23,223 @@ #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" -/* add our owner bbt descriptor */ -static uint8_t scan_ff_pattern[] = { 0xff }; -static struct nand_bbt_descr gpmi_bbt_descr = { - .options = 0, - .offs = 0, - .len = 1, - .pattern = scan_ff_pattern -}; +/* Converts time to clock cycles */ +#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period) +#define MXS_SET_ADDR 0x4 +#define MXS_CLR_ADDR 0x8 /* - * We may change the layout if we can get the ECC info from the datasheet, - * else we will use all the (page + OOB). + * Clear the bit and poll it cleared. This is usually called with + * a reset address and mask being either SFTRST(bit 31) or CLKGATE + * (bit 30). */ -static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) +static int clear_poll_bit(void __iomem *addr, u32 mask) { - struct nand_chip *chip = mtd_to_nand(mtd); - struct gpmi_nand_data *this = nand_get_controller_data(chip); - struct bch_geometry *geo = &this->bch_geometry; + int timeout = 0x400; - if (section) - return -ERANGE; + /* clear the bit */ + writel(mask, addr + MXS_CLR_ADDR); - oobregion->offset = 0; - oobregion->length = geo->page_size - mtd->writesize; + /* + * SFTRST needs 3 GPMI clocks to settle, the reference manual + * recommends to wait 1us. + */ + udelay(1); - return 0; + /* poll the bit becoming clear */ + while ((readl(addr) & mask) && --timeout) + /* nothing */; + + return !timeout; } -static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) +#define MODULE_CLKGATE (1 << 30) +#define MODULE_SFTRST (1 << 31) +/* + * The current mxs_reset_block() will do two things: + * [1] enable the module. + * [2] reset the module. + * + * In most of the cases, it's ok. + * But in MX23, there is a hardware bug in the BCH block (see erratum #2847). + * If you try to soft reset the BCH block, it becomes unusable until + * the next hard reset. This case occurs in the NAND boot mode. When the board + * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. + * So If the driver tries to reset the BCH again, the BCH will not work anymore. + * You will see a DMA timeout in this case. The bug has been fixed + * in the following chips, such as MX28. + * + * To avoid this bug, just add a new parameter `just_enable` for + * the mxs_reset_block(), and rewrite it here. + */ +static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) { - struct nand_chip *chip = mtd_to_nand(mtd); - struct gpmi_nand_data *this = nand_get_controller_data(chip); - struct bch_geometry *geo = &this->bch_geometry; + int ret; + int timeout = 0x400; - if (section) - return -ERANGE; + /* clear and poll SFTRST */ + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); + if (unlikely(ret)) + goto error; - /* The available oob size we have. */ - if (geo->page_size < mtd->writesize + mtd->oobsize) { - oobregion->offset = geo->page_size - mtd->writesize; - oobregion->length = mtd->oobsize - oobregion->offset; + /* clear CLKGATE */ + writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); + + if (!just_enable) { + /* set SFTRST to reset the block */ + writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR); + udelay(1); + + /* poll CLKGATE becoming set */ + while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) + /* nothing */; + if (unlikely(!timeout)) + goto error; } + /* clear and poll SFTRST */ + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); + if (unlikely(ret)) + goto error; + + /* clear and poll CLKGATE */ + ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); + if (unlikely(ret)) + goto error; + return 0; + +error: + pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); + return -ETIMEDOUT; } -static const char * const gpmi_clks_for_mx2x[] = { - "gpmi_io", -}; - -static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { - .ecc = gpmi_ooblayout_ecc, - .free = gpmi_ooblayout_free, -}; - -static const struct gpmi_devdata gpmi_devdata_imx23 = { - .type = IS_MX23, - .bch_max_ecc_strength = 20, - .max_chain_delay = 16000, - .clks = gpmi_clks_for_mx2x, - .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), -}; - -static const struct gpmi_devdata gpmi_devdata_imx28 = { - .type = IS_MX28, - .bch_max_ecc_strength = 20, - .max_chain_delay = 16000, - .clks = gpmi_clks_for_mx2x, - .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), -}; - -static const char * const gpmi_clks_for_mx6[] = { - "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", -}; - -static const struct gpmi_devdata gpmi_devdata_imx6q = { - .type = IS_MX6Q, - .bch_max_ecc_strength = 40, - .max_chain_delay = 12000, - .clks = gpmi_clks_for_mx6, - .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), -}; - -static const struct gpmi_devdata gpmi_devdata_imx6sx = { - .type = IS_MX6SX, - .bch_max_ecc_strength = 62, - .max_chain_delay = 12000, - .clks = gpmi_clks_for_mx6, - .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), -}; - -static const char * const gpmi_clks_for_mx7d[] = { - "gpmi_io", "gpmi_bch_apb", -}; - -static const struct gpmi_devdata gpmi_devdata_imx7d = { - .type = IS_MX7D, - .bch_max_ecc_strength = 62, - .max_chain_delay = 12000, - .clks = gpmi_clks_for_mx7d, - .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d), -}; - -static irqreturn_t bch_irq(int irq, void *cookie) +static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v) { - struct gpmi_nand_data *this = cookie; + struct clk *clk; + int ret; + int i; - gpmi_clear_bch(this); - complete(&this->bch_done); - return IRQ_HANDLED; + for (i = 0; i < GPMI_CLK_MAX; i++) { + clk = this->resources.clock[i]; + if (!clk) + break; + + if (v) { + ret = clk_prepare_enable(clk); + if (ret) + goto err_clk; + } else { + clk_disable_unprepare(clk); + } + } + return 0; + +err_clk: + for (; i > 0; i--) + clk_disable_unprepare(this->resources.clock[i - 1]); + return ret; } -/* - * Calculate the ECC strength by hand: - * E : The ECC strength. - * G : the length of Galois Field. - * N : The chunk count of per page. - * O : the oobsize of the NAND chip. - * M : the metasize of per page. - * - * The formula is : - * E * G * N - * ------------ <= (O - M) - * 8 - * - * So, we get E by: - * (O - M) * 8 - * E <= ------------- - * G * N - */ -static inline int get_ecc_strength(struct gpmi_nand_data *this) +static int gpmi_enable_clk(struct gpmi_nand_data *this) { + return __gpmi_enable_clk(this, true); +} + +static int gpmi_disable_clk(struct gpmi_nand_data *this) +{ + return __gpmi_enable_clk(this, false); +} + +static int gpmi_init(struct gpmi_nand_data *this) +{ + struct resources *r = &this->resources; + int ret; + + ret = gpmi_enable_clk(this); + if (ret) + return ret; + ret = gpmi_reset_block(r->gpmi_regs, false); + if (ret) + goto err_out; + + /* + * Reset BCH here, too. We got failures otherwise :( + * See later BCH reset for explanation of MX23 and MX28 handling + */ + ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); + if (ret) + goto err_out; + + /* Choose NAND mode. */ + writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); + + /* Set the IRQ polarity. */ + writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, + r->gpmi_regs + HW_GPMI_CTRL1_SET); + + /* Disable Write-Protection. */ + writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET); + + /* Select BCH ECC. */ + writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); + + /* + * Decouple the chip select from dma channel. We use dma0 for all + * the chips. + */ + writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); + + gpmi_disable_clk(this); + return 0; +err_out: + gpmi_disable_clk(this); + return ret; +} + +/* This function is very useful. It is called only when the bug occur. */ +static void gpmi_dump_info(struct gpmi_nand_data *this) +{ + struct resources *r = &this->resources; struct bch_geometry *geo = &this->bch_geometry; - struct mtd_info *mtd = nand_to_mtd(&this->nand); - int ecc_strength; + u32 reg; + int i; - ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) - / (geo->gf_len * geo->ecc_chunk_count); + dev_err(this->dev, "Show GPMI registers :\n"); + for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) { + reg = readl(r->gpmi_regs + i * 0x10); + dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); + } - /* We need the minor even number. */ - return round_down(ecc_strength, 2); + /* start to print out the BCH info */ + dev_err(this->dev, "Show BCH registers :\n"); + for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) { + reg = readl(r->bch_regs + i * 0x10); + dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); + } + dev_err(this->dev, "BCH Geometry :\n" + "GF length : %u\n" + "ECC Strength : %u\n" + "Page Size in Bytes : %u\n" + "Metadata Size in Bytes : %u\n" + "ECC Chunk Size in Bytes: %u\n" + "ECC Chunk Count : %u\n" + "Payload Size in Bytes : %u\n" + "Auxiliary Size in Bytes: %u\n" + "Auxiliary Status Offset: %u\n" + "Block Mark Byte Offset : %u\n" + "Block Mark Bit Offset : %u\n", + geo->gf_len, + geo->ecc_strength, + geo->page_size, + geo->metadata_size, + geo->ecc_chunk_size, + geo->ecc_chunk_count, + geo->payload_size, + geo->auxiliary_size, + geo->auxiliary_status_offset, + geo->block_mark_byte_offset, + geo->block_mark_bit_offset); } static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) @@ -296,6 +372,37 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this, return 0; } +/* + * Calculate the ECC strength by hand: + * E : The ECC strength. + * G : the length of Galois Field. + * N : The chunk count of per page. + * O : the oobsize of the NAND chip. + * M : the metasize of per page. + * + * The formula is : + * E * G * N + * ------------ <= (O - M) + * 8 + * + * So, we get E by: + * (O - M) * 8 + * E <= ------------- + * G * N + */ +static inline int get_ecc_strength(struct gpmi_nand_data *this) +{ + struct bch_geometry *geo = &this->bch_geometry; + struct mtd_info *mtd = nand_to_mtd(&this->nand); + int ecc_strength; + + ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) + / (geo->gf_len * geo->ecc_chunk_count); + + /* We need the minor even number. */ + return round_down(ecc_strength, 2); +} + static int legacy_set_geometry(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; @@ -408,7 +515,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this) return 0; } -int common_nfc_set_geometry(struct gpmi_nand_data *this) +static int common_nfc_set_geometry(struct gpmi_nand_data *this) { struct nand_chip *chip = &this->nand; @@ -430,15 +537,424 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this) return 0; } -struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) +/* Configures the geometry for BCH. */ +static int bch_set_geometry(struct gpmi_nand_data *this) +{ + struct resources *r = &this->resources; + struct bch_geometry *bch_geo = &this->bch_geometry; + unsigned int block_count; + unsigned int block_size; + unsigned int metadata_size; + unsigned int ecc_strength; + unsigned int page_size; + unsigned int gf_len; + int ret; + + ret = common_nfc_set_geometry(this); + if (ret) + return ret; + + block_count = bch_geo->ecc_chunk_count - 1; + block_size = bch_geo->ecc_chunk_size; + metadata_size = bch_geo->metadata_size; + ecc_strength = bch_geo->ecc_strength >> 1; + page_size = bch_geo->page_size; + gf_len = bch_geo->gf_len; + + ret = gpmi_enable_clk(this); + if (ret) + return ret; + + /* + * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this + * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. + * and MX28. + */ + ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this)); + if (ret) + goto err_out; + + /* Configure layout 0. */ + writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) + | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) + | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) + | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) + | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this), + r->bch_regs + HW_BCH_FLASH0LAYOUT0); + + writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) + | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) + | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) + | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this), + r->bch_regs + HW_BCH_FLASH0LAYOUT1); + + /* Set *all* chip selects to use layout 0. */ + writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT); + + /* Enable interrupts. */ + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, + r->bch_regs + HW_BCH_CTRL_SET); + + gpmi_disable_clk(this); + return 0; +err_out: + gpmi_disable_clk(this); + return ret; +} + +/* + * <1> Firstly, we should know what's the GPMI-clock means. + * The GPMI-clock is the internal clock in the gpmi nand controller. + * If you set 100MHz to gpmi nand controller, the GPMI-clock's period + * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. + * + * <2> Secondly, we should know what's the frequency on the nand chip pins. + * The frequency on the nand chip pins is derived from the GPMI-clock. + * We can get it from the following equation: + * + * F = G / (DS + DH) + * + * F : the frequency on the nand chip pins. + * G : the GPMI clock, such as 100MHz. + * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP + * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD + * + * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, + * the nand EDO(extended Data Out) timing could be applied. + * The GPMI implements a feedback read strobe to sample the read data. + * The feedback read strobe can be delayed to support the nand EDO timing + * where the read strobe may deasserts before the read data is valid, and + * read data is valid for some time after read strobe. + * + * The following figure illustrates some aspects of a NAND Flash read: + * + * |<---tREA---->| + * | | + * | | | + * |<--tRP-->| | + * | | | + * __ ___|__________________________________ + * RDN \________/ | + * | + * /---------\ + * Read Data --------------< >--------- + * \---------/ + * | | + * |<-D->| + * FeedbackRDN ________ ____________ + * \___________/ + * + * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. + * + * + * <4> Now, we begin to describe how to compute the right RDN_DELAY. + * + * 4.1) From the aspect of the nand chip pins: + * Delay = (tREA + C - tRP) {1} + * + * tREA : the maximum read access time. + * C : a constant to adjust the delay. default is 4000ps. + * tRP : the read pulse width, which is exactly: + * tRP = (GPMI-clock-period) * DATA_SETUP + * + * 4.2) From the aspect of the GPMI nand controller: + * Delay = RDN_DELAY * 0.125 * RP {2} + * + * RP : the DLL reference period. + * if (GPMI-clock-period > DLL_THRETHOLD) + * RP = GPMI-clock-period / 2; + * else + * RP = GPMI-clock-period; + * + * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period + * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD + * is 16000ps, but in mx6q, we use 12000ps. + * + * 4.3) since {1} equals {2}, we get: + * + * (tREA + 4000 - tRP) * 8 + * RDN_DELAY = ----------------------- {3} + * RP + */ +static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, + const struct nand_sdr_timings *sdr) +{ + struct gpmi_nfc_hardware_timing *hw = &this->hw; + unsigned int dll_threshold_ps = this->devdata->max_chain_delay; + unsigned int period_ps, reference_period_ps; + unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; + unsigned int tRP_ps; + bool use_half_period; + int sample_delay_ps, sample_delay_factor; + u16 busy_timeout_cycles; + u8 wrn_dly_sel; + + if (sdr->tRC_min >= 30000) { + /* ONFI non-EDO modes [0-3] */ + hw->clk_rate = 22000000; + wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; + } else if (sdr->tRC_min >= 25000) { + /* ONFI EDO mode 4 */ + hw->clk_rate = 80000000; + wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; + } else { + /* ONFI EDO mode 5 */ + hw->clk_rate = 100000000; + wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; + } + + /* SDR core timings are given in picoseconds */ + period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); + + addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); + data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); + data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); + busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); + + hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | + BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | + BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); + hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096); + + /* + * Derive NFC ideal delay from {3}: + * + * (tREA + 4000 - tRP) * 8 + * RDN_DELAY = ----------------------- + * RP + */ + if (period_ps > dll_threshold_ps) { + use_half_period = true; + reference_period_ps = period_ps / 2; + } else { + use_half_period = false; + reference_period_ps = period_ps; + } + + tRP_ps = data_setup_cycles * period_ps; + sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8; + if (sample_delay_ps > 0) + sample_delay_factor = sample_delay_ps / reference_period_ps; + else + sample_delay_factor = 0; + + hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel); + if (sample_delay_factor) + hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) | + BM_GPMI_CTRL1_DLL_ENABLE | + (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); +} + +static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) +{ + struct gpmi_nfc_hardware_timing *hw = &this->hw; + struct resources *r = &this->resources; + void __iomem *gpmi_regs = r->gpmi_regs; + unsigned int dll_wait_time_us; + + clk_set_rate(r->clock[0], hw->clk_rate); + + writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0); + writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1); + + /* + * Clear several CTRL1 fields, DLL must be disabled when setting + * RDN_DELAY or HALF_PERIOD. + */ + writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR); + writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET); + + /* Wait 64 clock cycles before using the GPMI after enabling the DLL */ + dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64; + if (!dll_wait_time_us) + dll_wait_time_us = 1; + + /* Wait for the DLL to settle. */ + udelay(dll_wait_time_us); +} + +static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) +{ + struct gpmi_nand_data *this = nand_get_controller_data(chip); + const struct nand_sdr_timings *sdr; + + /* Retrieve required NAND timings */ + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + /* Only MX6 GPMI controller can reach EDO timings */ + if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this)) + return -ENOTSUPP; + + /* Stop here if this call was just a check */ + if (chipnr < 0) + return 0; + + /* Do the actual derivation of the controller timings */ + gpmi_nfc_compute_timings(this, sdr); + + this->hw.must_apply_timings = true; + + return 0; +} + +/* Clears a BCH interrupt. */ +static void gpmi_clear_bch(struct gpmi_nand_data *this) +{ + struct resources *r = &this->resources; + writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR); +} + +/* Returns the Ready/Busy status of the given chip. */ +static int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip) +{ + struct resources *r = &this->resources; + uint32_t mask = 0; + uint32_t reg = 0; + + if (GPMI_IS_MX23(this)) { + mask = MX23_BM_GPMI_DEBUG_READY0 << chip; + reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); + } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) { + /* + * In the imx6, all the ready/busy pins are bound + * together. So we only need to check chip 0. + */ + if (GPMI_IS_MX6(this)) + chip = 0; + + /* MX28 shares the same R/B register as MX6Q. */ + mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); + reg = readl(r->gpmi_regs + HW_GPMI_STAT); + } else + dev_err(this->dev, "unknown arch.\n"); + return reg & mask; +} + +static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) { /* We use the DMA channel 0 to access all the nand chips. */ return this->dma_chans[0]; } +/* This will be called after the DMA operation is finished. */ +static void dma_irq_callback(void *param) +{ + struct gpmi_nand_data *this = param; + struct completion *dma_c = &this->dma_done; + + complete(dma_c); +} + +static int start_dma_without_bch_irq(struct gpmi_nand_data *this, + struct dma_async_tx_descriptor *desc) +{ + struct completion *dma_c = &this->dma_done; + unsigned long timeout; + + init_completion(dma_c); + + desc->callback = dma_irq_callback; + desc->callback_param = this; + dmaengine_submit(desc); + dma_async_issue_pending(get_dma_chan(this)); + + /* Wait for the interrupt from the DMA block. */ + timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); + if (!timeout) { + dev_err(this->dev, "DMA timeout, last DMA\n"); + gpmi_dump_info(this); + return -ETIMEDOUT; + } + return 0; +} + +static irqreturn_t bch_irq(int irq, void *cookie) +{ + struct gpmi_nand_data *this = cookie; + + gpmi_clear_bch(this); + complete(&this->bch_done); + return IRQ_HANDLED; +} + +/* + * This function is used in BCH reading or BCH writing pages. + * It will wait for the BCH interrupt as long as ONE second. + * Actually, we must wait for two interrupts : + * [1] firstly the DMA interrupt and + * [2] secondly the BCH interrupt. + */ +static int start_dma_with_bch_irq(struct gpmi_nand_data *this, + struct dma_async_tx_descriptor *desc) +{ + struct completion *bch_c = &this->bch_done; + unsigned long timeout; + + /* Prepare to receive an interrupt from the BCH block. */ + init_completion(bch_c); + + /* start the DMA */ + start_dma_without_bch_irq(this, desc); + + /* Wait for the interrupt from the BCH block. */ + timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); + if (!timeout) { + dev_err(this->dev, "BCH timeout\n"); + gpmi_dump_info(this); + return -ETIMEDOUT; + } + return 0; +} + +static int gpmi_send_command(struct gpmi_nand_data *this) +{ + struct dma_chan *channel = get_dma_chan(this); + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl; + int chip = this->current_chip; + int ret; + u32 pio[3]; + + /* [1] send out the PIO words */ + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) + | BM_GPMI_CTRL0_ADDRESS_INCREMENT + | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); + pio[1] = pio[2] = 0; + desc = dmaengine_prep_slave_sg(channel, + (struct scatterlist *)pio, + ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); + if (!desc) + return -EINVAL; + + /* [2] send out the COMMAND + ADDRESS string stored in @buffer */ + sgl = &this->cmd_sgl; + + sg_init_one(sgl, this->cmd_buffer, this->command_length); + dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); + desc = dmaengine_prep_slave_sg(channel, + sgl, 1, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + /* [3] submit the DMA */ + ret = start_dma_without_bch_irq(this, desc); + + dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE); + + return ret; +} + /* Can we use the upper's buffer directly for DMA? */ -bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len, - enum dma_data_direction dr) +static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, + int len, enum dma_data_direction dr) { struct scatterlist *sgl = &this->data_sgl; int ret; @@ -465,67 +981,472 @@ map_fail: return false; } -/* This will be called after the DMA operation is finished. */ -static void dma_irq_callback(void *param) +static int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len) { - struct gpmi_nand_data *this = param; - struct completion *dma_c = &this->dma_done; + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = get_dma_chan(this); + int chip = this->current_chip; + int ret; + uint32_t command_mode; + uint32_t address; + u32 pio[2]; - complete(dma_c); + /* [1] PIO */ + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(address) + | BF_GPMI_CTRL0_XFER_COUNT(len); + pio[1] = 0; + desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, + ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); + if (!desc) + return -EINVAL; + + /* [2] send DMA request */ + prepare_data_dma(this, buf, len, DMA_TO_DEVICE); + desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, + 1, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + /* [3] submit the DMA */ + ret = start_dma_without_bch_irq(this, desc); + + dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); + + return ret; } -int start_dma_without_bch_irq(struct gpmi_nand_data *this, - struct dma_async_tx_descriptor *desc) +static int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len) { - struct completion *dma_c = &this->dma_done; - unsigned long timeout; + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = get_dma_chan(this); + int chip = this->current_chip; + int ret; + u32 pio[2]; + bool direct; - init_completion(dma_c); + /* [1] : send PIO */ + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) + | BF_GPMI_CTRL0_XFER_COUNT(len); + pio[1] = 0; + desc = dmaengine_prep_slave_sg(channel, + (struct scatterlist *)pio, + ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); + if (!desc) + return -EINVAL; - desc->callback = dma_irq_callback; - desc->callback_param = this; - dmaengine_submit(desc); - dma_async_issue_pending(get_dma_chan(this)); + /* [2] : send DMA request */ + direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE); + desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, + 1, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; - /* Wait for the interrupt from the DMA block. */ - timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); - if (!timeout) { - dev_err(this->dev, "DMA timeout, last DMA\n"); - gpmi_dump_info(this); - return -ETIMEDOUT; + /* [3] : submit the DMA */ + + ret = start_dma_without_bch_irq(this, desc); + + dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); + if (!direct) + memcpy(buf, this->data_buffer_dma, len); + + return ret; +} + +static int gpmi_send_page(struct gpmi_nand_data *this, dma_addr_t payload, + dma_addr_t auxiliary) +{ + struct bch_geometry *geo = &this->bch_geometry; + uint32_t command_mode; + uint32_t address; + uint32_t ecc_command; + uint32_t buffer_mask; + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = get_dma_chan(this); + int chip = this->current_chip; + u32 pio[6]; + + /* A DMA descriptor that does an ECC page read. */ + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; + ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE; + buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | + BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(address) + | BF_GPMI_CTRL0_XFER_COUNT(0); + pio[1] = 0; + pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC + | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) + | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); + pio[3] = geo->page_size; + pio[4] = payload; + pio[5] = auxiliary; + + desc = dmaengine_prep_slave_sg(channel, + (struct scatterlist *)pio, + ARRAY_SIZE(pio), DMA_TRANS_NONE, + DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + return start_dma_with_bch_irq(this, desc); +} + +static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, + dma_addr_t auxiliary) +{ + struct bch_geometry *geo = &this->bch_geometry; + uint32_t command_mode; + uint32_t address; + uint32_t ecc_command; + uint32_t buffer_mask; + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = get_dma_chan(this); + int chip = this->current_chip; + u32 pio[6]; + + /* [1] Wait for the chip to report ready. */ + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(address) + | BF_GPMI_CTRL0_XFER_COUNT(0); + pio[1] = 0; + desc = dmaengine_prep_slave_sg(channel, + (struct scatterlist *)pio, 2, + DMA_TRANS_NONE, 0); + if (!desc) + return -EINVAL; + + /* [2] Enable the BCH block and read. */ + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ; + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; + ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE; + buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE + | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(address) + | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); + + pio[1] = 0; + pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC + | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) + | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); + pio[3] = geo->page_size; + pio[4] = payload; + pio[5] = auxiliary; + desc = dmaengine_prep_slave_sg(channel, + (struct scatterlist *)pio, + ARRAY_SIZE(pio), DMA_TRANS_NONE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + /* [3] Disable the BCH block */ + command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; + address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(address) + | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); + pio[1] = 0; + pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */ + desc = dmaengine_prep_slave_sg(channel, + (struct scatterlist *)pio, 3, + DMA_TRANS_NONE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + /* [4] submit the DMA */ + return start_dma_with_bch_irq(this, desc); +} + +/** + * gpmi_copy_bits - copy bits from one memory region to another + * @dst: destination buffer + * @dst_bit_off: bit offset we're starting to write at + * @src: source buffer + * @src_bit_off: bit offset we're starting to read from + * @nbits: number of bits to copy + * + * This functions copies bits from one memory region to another, and is used by + * the GPMI driver to copy ECC sections which are not guaranteed to be byte + * aligned. + * + * src and dst should not overlap. + * + */ +static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src, + size_t src_bit_off, size_t nbits) +{ + size_t i; + size_t nbytes; + u32 src_buffer = 0; + size_t bits_in_src_buffer = 0; + + if (!nbits) + return; + + /* + * Move src and dst pointers to the closest byte pointer and store bit + * offsets within a byte. + */ + src += src_bit_off / 8; + src_bit_off %= 8; + + dst += dst_bit_off / 8; + dst_bit_off %= 8; + + /* + * Initialize the src_buffer value with bits available in the first + * byte of data so that we end up with a byte aligned src pointer. + */ + if (src_bit_off) { + src_buffer = src[0] >> src_bit_off; + if (nbits >= (8 - src_bit_off)) { + bits_in_src_buffer += 8 - src_bit_off; + } else { + src_buffer &= GENMASK(nbits - 1, 0); + bits_in_src_buffer += nbits; + } + nbits -= bits_in_src_buffer; + src++; + } + + /* Calculate the number of bytes that can be copied from src to dst. */ + nbytes = nbits / 8; + + /* Try to align dst to a byte boundary. */ + if (dst_bit_off) { + if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) { + src_buffer |= src[0] << bits_in_src_buffer; + bits_in_src_buffer += 8; + src++; + nbytes--; + } + + if (bits_in_src_buffer >= (8 - dst_bit_off)) { + dst[0] &= GENMASK(dst_bit_off - 1, 0); + dst[0] |= src_buffer << dst_bit_off; + src_buffer >>= (8 - dst_bit_off); + bits_in_src_buffer -= (8 - dst_bit_off); + dst_bit_off = 0; + dst++; + if (bits_in_src_buffer > 7) { + bits_in_src_buffer -= 8; + dst[0] = src_buffer; + dst++; + src_buffer >>= 8; + } + } + } + + if (!bits_in_src_buffer && !dst_bit_off) { + /* + * Both src and dst pointers are byte aligned, thus we can + * just use the optimized memcpy function. + */ + if (nbytes) + memcpy(dst, src, nbytes); + } else { + /* + * src buffer is not byte aligned, hence we have to copy each + * src byte to the src_buffer variable before extracting a byte + * to store in dst. + */ + for (i = 0; i < nbytes; i++) { + src_buffer |= src[i] << bits_in_src_buffer; + dst[i] = src_buffer; + src_buffer >>= 8; + } + } + /* Update dst and src pointers */ + dst += nbytes; + src += nbytes; + + /* + * nbits is the number of remaining bits. It should not exceed 8 as + * we've already copied as much bytes as possible. + */ + nbits %= 8; + + /* + * If there's no more bits to copy to the destination and src buffer + * was already byte aligned, then we're done. + */ + if (!nbits && !bits_in_src_buffer) + return; + + /* Copy the remaining bits to src_buffer */ + if (nbits) + src_buffer |= (*src & GENMASK(nbits - 1, 0)) << + bits_in_src_buffer; + bits_in_src_buffer += nbits; + + /* + * In case there were not enough bits to get a byte aligned dst buffer + * prepare the src_buffer variable to match the dst organization (shift + * src_buffer by dst_bit_off and retrieve the least significant bits + * from dst). + */ + if (dst_bit_off) + src_buffer = (src_buffer << dst_bit_off) | + (*dst & GENMASK(dst_bit_off - 1, 0)); + bits_in_src_buffer += dst_bit_off; + + /* + * Keep most significant bits from dst if we end up with an unaligned + * number of bits. + */ + nbytes = bits_in_src_buffer / 8; + if (bits_in_src_buffer % 8) { + src_buffer |= (dst[nbytes] & + GENMASK(7, bits_in_src_buffer % 8)) << + (nbytes * 8); + nbytes++; + } + + /* Copy the remaining bytes to dst */ + for (i = 0; i < nbytes; i++) { + dst[i] = src_buffer; + src_buffer >>= 8; } - return 0; } +/* add our owner bbt descriptor */ +static uint8_t scan_ff_pattern[] = { 0xff }; +static struct nand_bbt_descr gpmi_bbt_descr = { + .options = 0, + .offs = 0, + .len = 1, + .pattern = scan_ff_pattern +}; + /* - * This function is used in BCH reading or BCH writing pages. - * It will wait for the BCH interrupt as long as ONE second. - * Actually, we must wait for two interrupts : - * [1] firstly the DMA interrupt and - * [2] secondly the BCH interrupt. + * We may change the layout if we can get the ECC info from the datasheet, + * else we will use all the (page + OOB). */ -int start_dma_with_bch_irq(struct gpmi_nand_data *this, - struct dma_async_tx_descriptor *desc) +static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) { - struct completion *bch_c = &this->bch_done; - unsigned long timeout; + struct nand_chip *chip = mtd_to_nand(mtd); + struct gpmi_nand_data *this = nand_get_controller_data(chip); + struct bch_geometry *geo = &this->bch_geometry; - /* Prepare to receive an interrupt from the BCH block. */ - init_completion(bch_c); + if (section) + return -ERANGE; - /* start the DMA */ - start_dma_without_bch_irq(this, desc); + oobregion->offset = 0; + oobregion->length = geo->page_size - mtd->writesize; - /* Wait for the interrupt from the BCH block. */ - timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); - if (!timeout) { - dev_err(this->dev, "BCH timeout\n"); - gpmi_dump_info(this); - return -ETIMEDOUT; - } return 0; } +static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct gpmi_nand_data *this = nand_get_controller_data(chip); + struct bch_geometry *geo = &this->bch_geometry; + + if (section) + return -ERANGE; + + /* The available oob size we have. */ + if (geo->page_size < mtd->writesize + mtd->oobsize) { + oobregion->offset = geo->page_size - mtd->writesize; + oobregion->length = mtd->oobsize - oobregion->offset; + } + + return 0; +} + +static const char * const gpmi_clks_for_mx2x[] = { + "gpmi_io", +}; + +static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { + .ecc = gpmi_ooblayout_ecc, + .free = gpmi_ooblayout_free, +}; + +static const struct gpmi_devdata gpmi_devdata_imx23 = { + .type = IS_MX23, + .bch_max_ecc_strength = 20, + .max_chain_delay = 16000, + .clks = gpmi_clks_for_mx2x, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), +}; + +static const struct gpmi_devdata gpmi_devdata_imx28 = { + .type = IS_MX28, + .bch_max_ecc_strength = 20, + .max_chain_delay = 16000, + .clks = gpmi_clks_for_mx2x, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), +}; + +static const char * const gpmi_clks_for_mx6[] = { + "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", +}; + +static const struct gpmi_devdata gpmi_devdata_imx6q = { + .type = IS_MX6Q, + .bch_max_ecc_strength = 40, + .max_chain_delay = 12000, + .clks = gpmi_clks_for_mx6, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), +}; + +static const struct gpmi_devdata gpmi_devdata_imx6sx = { + .type = IS_MX6SX, + .bch_max_ecc_strength = 62, + .max_chain_delay = 12000, + .clks = gpmi_clks_for_mx6, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), +}; + +static const char * const gpmi_clks_for_mx7d[] = { + "gpmi_io", "gpmi_bch_apb", +}; + +static const struct gpmi_devdata gpmi_devdata_imx7d = { + .type = IS_MX7D, + .bch_max_ecc_strength = 62, + .max_chain_delay = 12000, + .clks = gpmi_clks_for_mx7d, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d), +}; + static int acquire_register_block(struct gpmi_nand_data *this, const char *res_name) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h index a804a4a5bd46..e001c84b75fa 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h @@ -159,40 +159,6 @@ struct gpmi_nand_data { void *private; }; -/* Common Services */ -int common_nfc_set_geometry(struct gpmi_nand_data *); -struct dma_chan *get_dma_chan(struct gpmi_nand_data *); -bool prepare_data_dma(struct gpmi_nand_data *, const void *buf, int len, - enum dma_data_direction dr); -int start_dma_without_bch_irq(struct gpmi_nand_data *, - struct dma_async_tx_descriptor *); -int start_dma_with_bch_irq(struct gpmi_nand_data *, - struct dma_async_tx_descriptor *); - -/* GPMI-NAND helper function library */ -int gpmi_init(struct gpmi_nand_data *); -void gpmi_clear_bch(struct gpmi_nand_data *); -void gpmi_dump_info(struct gpmi_nand_data *); -int bch_set_geometry(struct gpmi_nand_data *); -int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip); -int gpmi_send_command(struct gpmi_nand_data *); -int gpmi_enable_clk(struct gpmi_nand_data *this); -int gpmi_disable_clk(struct gpmi_nand_data *this); -int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); -void gpmi_nfc_apply_timings(struct gpmi_nand_data *this); -int gpmi_read_data(struct gpmi_nand_data *, void *buf, int len); -int gpmi_send_data(struct gpmi_nand_data *, const void *buf, int len); - -int gpmi_send_page(struct gpmi_nand_data *, - dma_addr_t payload, dma_addr_t auxiliary); -int gpmi_read_page(struct gpmi_nand_data *, - dma_addr_t payload, dma_addr_t auxiliary); - -void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, - const u8 *src, size_t src_bit_off, - size_t nbits); - /* BCH : Status Block Completion Codes */ #define STATUS_GOOD 0x00 #define STATUS_ERASED 0xff From 727ab978e526bfbca8f8e3892178aed55b688e57 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:33 +0200 Subject: [PATCH 27/60] mtd: rawnand: gpmi: remove unused variable The "private" member of struct gpmi_nand_data isn't used anywhere. Remove it. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h index e001c84b75fa..ba074a35ff01 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h @@ -154,9 +154,6 @@ struct gpmi_nand_data { #define DMA_CHANS 8 struct dma_chan *dma_chans[DMA_CHANS]; struct completion dma_done; - - /* private */ - void *private; }; /* BCH : Status Block Completion Codes */ From 41e2322b258d6d907a4b721ee39a191f3b1e8a6c Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:34 +0200 Subject: [PATCH 28/60] mtd: rawnand: gpmi: Remove unnecessary variables this->page_buffer_virt and this->payload_virt are always set to the same value, so drop the former and just use the latter. Same for this->page_buffer_virt and this->payload_virt. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 15 ++++++--------- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h | 2 -- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 53e63eeafcf4..8a3fe04c58b6 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -1637,10 +1637,10 @@ static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) { struct device *dev = this->dev; - if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) + if (this->payload_virt && virt_addr_valid(this->payload_virt)) dma_free_coherent(dev, this->page_buffer_size, - this->page_buffer_virt, - this->page_buffer_phys); + this->payload_virt, + this->payload_phys); kfree(this->cmd_buffer); kfree(this->data_buffer_dma); kfree(this->raw_buffer); @@ -1648,7 +1648,6 @@ static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) this->cmd_buffer = NULL; this->data_buffer_dma = NULL; this->raw_buffer = NULL; - this->page_buffer_virt = NULL; this->page_buffer_size = 0; } @@ -1686,9 +1685,9 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) * auxiliary buffer will appear on a 32-bit boundary. */ this->page_buffer_size = geo->payload_size + geo->auxiliary_size; - this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size, - &this->page_buffer_phys, GFP_DMA); - if (!this->page_buffer_virt) + this->payload_virt = dma_alloc_coherent(dev, this->page_buffer_size, + &this->payload_phys, GFP_DMA); + if (!this->payload_virt) goto error_alloc; this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); @@ -1696,8 +1695,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) goto error_alloc; /* Slice up the page buffer. */ - this->payload_virt = this->page_buffer_virt; - this->payload_phys = this->page_buffer_phys; this->auxiliary_virt = this->payload_virt + geo->payload_size; this->auxiliary_phys = this->payload_phys + geo->payload_size; return 0; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h index ba074a35ff01..51a070da84ed 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h @@ -138,8 +138,6 @@ struct gpmi_nand_data { struct scatterlist data_sgl; char *data_buffer_dma; - void *page_buffer_virt; - dma_addr_t page_buffer_phys; unsigned int page_buffer_size; void *payload_virt; From b05d73d2ceafe0d2f65f8223c2b4259e6aefcbe9 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:35 +0200 Subject: [PATCH 29/60] mtd: rawnand: gpmi: read buf in nand_read_page_op The driver calls nand_read_page_op without a buffer passed and then calls chip->legacy.read_buf to read the buffer afterwards which is the same as passing the buffer nand_read_page_op in the first place. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 26 +++++++++++++++------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 8a3fe04c58b6..4cc37f1b0f8e 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -2244,14 +2244,17 @@ static int gpmi_ecc_read_oob(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); struct gpmi_nand_data *this = nand_get_controller_data(chip); + int ret; dev_dbg(this->dev, "page number is %d\n", page); /* clear the OOB buffer */ memset(chip->oob_poi, ~0, mtd->oobsize); /* Read out the conventional OOB. */ - nand_read_page_op(chip, page, mtd->writesize, NULL, 0); - chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize); + ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi, + mtd->oobsize); + if (ret) + return ret; /* * Now, we want to make sure the block mark is correct. In the @@ -2260,8 +2263,9 @@ static int gpmi_ecc_read_oob(struct nand_chip *chip, int page) */ if (GPMI_IS_MX23(this)) { /* Read the block mark into the first byte of the OOB buffer. */ - nand_read_page_op(chip, page, 0, NULL, 0); - chip->oob_poi[0] = chip->legacy.read_byte(chip); + ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1); + if (ret) + return ret; } return 0; @@ -2526,6 +2530,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) u8 *buffer = nand_get_data_buf(chip); int saved_chip_number; int found_an_ncb_fingerprint = false; + int ret; /* Compute the number of strides in a search area. */ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; @@ -2548,8 +2553,10 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) * Read the NCB fingerprint. The fingerprint is four bytes long * and starts in the 12th byte of the page. */ - nand_read_page_op(chip, page, 12, NULL, 0); - chip->legacy.read_buf(chip, buffer, strlen(fingerprint)); + ret = nand_read_page_op(chip, page, 12, buffer, + strlen(fingerprint)); + if (ret) + continue; /* Look for the fingerprint. */ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { @@ -2691,10 +2698,13 @@ static int mx23_boot_init(struct gpmi_nand_data *this) /* Send the command to read the conventional block mark. */ nand_select_target(chip, chipnr); - nand_read_page_op(chip, page, mtd->writesize, NULL, 0); - block_mark = chip->legacy.read_byte(chip); + ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark, + 1); nand_deselect_target(chip); + if (ret) + continue; + /* * Check if the block is marked bad. If so, we need to mark it * again, but this time the result will be a mark in the From ad8b4f1454ad2c7509bc8bf8930bead59619d117 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:36 +0200 Subject: [PATCH 30/60] mtd: rawnand: gpmi: remove unused parameters gpmi_ecc_read_page_data uses the page parameter only for a debug printf, so we can drop the parameter and the debug printf. Moving the oob delivery from gpmi_ecc_read_page_data to gpmi_ecc_read_page makes the oob_required parameter unnecessary aswell. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 34 ++++++++++++---------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 4cc37f1b0f8e..62f4b0780a7a 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -1852,9 +1852,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this, p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); } -static int gpmi_ecc_read_page_data(struct nand_chip *chip, - uint8_t *buf, int oob_required, - int page) +static int gpmi_ecc_read_page_data(struct nand_chip *chip, uint8_t *buf) { struct gpmi_nand_data *this = nand_get_controller_data(chip); struct bch_geometry *nfc_geo = &this->bch_geometry; @@ -1866,8 +1864,6 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip, int ret; bool direct = false; - dev_dbg(this->dev, "page number is : %d\n", page); - payload_phys = this->payload_phys; if (virt_addr_valid(buf)) { @@ -1982,6 +1978,22 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip, /* handle the block mark swapping */ block_mark_swapping(this, buf, this->auxiliary_virt); + return max_bitflips; +} + +static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page) +{ + struct gpmi_nand_data *this = nand_get_controller_data(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + nand_read_page_op(chip, page, 0, NULL, 0); + + ret = gpmi_ecc_read_page_data(chip, buf); + if (ret < 0) + return ret; + if (oob_required) { /* * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() @@ -1997,15 +2009,7 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip, chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0]; } - return max_bitflips; -} - -static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf, - int oob_required, int page) -{ - nand_read_page_op(chip, page, 0, NULL, 0); - - return gpmi_ecc_read_page_data(chip, buf, oob_required, page); + return ret; } /* Fake a virtual small page for the subpage read */ @@ -2086,7 +2090,7 @@ static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs, /* Read the subpage now */ this->swap_block_mark = false; - max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page); + max_bitflips = gpmi_ecc_read_page_data(chip, buf); /* Restore */ writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0); From 1ee514d00d64678be30b6ce2d5daa49965b9ee2b Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:37 +0200 Subject: [PATCH 31/60] mtd: rawnand: gpmi: Drop unnecessary restoring of previous chipselection The i.MX23 specific option read code is called right after nand_scan. We can rely on the NAND core having disabled the chipselect, so there's no point in restoring the original chip select after NAND operations. Drop it. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 62f4b0780a7a..1007ac3a2909 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -2532,14 +2532,12 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) unsigned int stride; unsigned int page; u8 *buffer = nand_get_data_buf(chip); - int saved_chip_number; int found_an_ncb_fingerprint = false; int ret; /* Compute the number of strides in a search area. */ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; - saved_chip_number = this->current_chip; nand_select_target(chip, 0); /* @@ -2570,10 +2568,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) } - if (saved_chip_number >= 0) - nand_select_target(chip, saved_chip_number); - else - nand_deselect_target(chip); + nand_deselect_target(chip); if (found_an_ncb_fingerprint) dev_dbg(dev, "\tFound a fingerprint\n"); @@ -2597,7 +2592,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) unsigned int stride; unsigned int page; u8 *buffer = nand_get_data_buf(chip); - int saved_chip_number; int status; /* Compute the search area geometry. */ @@ -2614,8 +2608,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); - /* Select chip 0. */ - saved_chip_number = this->current_chip; nand_select_target(chip, 0); /* Loop over blocks in the first search area, erasing them. */ @@ -2647,11 +2639,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) dev_err(dev, "[%s] Write failed.\n", __func__); } - /* Deselect chip 0. */ - if (saved_chip_number >= 0) - nand_select_target(chip, saved_chip_number); - else - nand_deselect_target(chip); + nand_deselect_target(chip); return 0; } From 041414682b0daa04c9ab40c87ffd86bc55abf592 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:38 +0200 Subject: [PATCH 32/60] mtd: rawnand: gpmi: use runtime PM to manage clocks The gpmi driver aggressively en/disables the clocks between operations which has significant performance cost. Use runtime PM to get rid of this bottleneck. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 69 ++++++++++++++-------- 1 file changed, 44 insertions(+), 25 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 1007ac3a2909..0c943ed42c85 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "gpmi-nand.h" #include "gpmi-regs.h" #include "bch-regs.h" @@ -141,24 +142,11 @@ err_clk: return ret; } -static int gpmi_enable_clk(struct gpmi_nand_data *this) -{ - return __gpmi_enable_clk(this, true); -} - -static int gpmi_disable_clk(struct gpmi_nand_data *this) -{ - return __gpmi_enable_clk(this, false); -} - static int gpmi_init(struct gpmi_nand_data *this) { struct resources *r = &this->resources; int ret; - ret = gpmi_enable_clk(this); - if (ret) - return ret; ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) goto err_out; @@ -190,10 +178,8 @@ static int gpmi_init(struct gpmi_nand_data *this) */ writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); - gpmi_disable_clk(this); return 0; err_out: - gpmi_disable_clk(this); return ret; } @@ -561,8 +547,8 @@ static int bch_set_geometry(struct gpmi_nand_data *this) page_size = bch_geo->page_size; gf_len = bch_geo->gf_len; - ret = gpmi_enable_clk(this); - if (ret) + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) return ret; /* @@ -595,10 +581,11 @@ static int bch_set_geometry(struct gpmi_nand_data *this) writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, r->bch_regs + HW_BCH_CTRL_SET); - gpmi_disable_clk(this); - return 0; + ret = 0; err_out: - gpmi_disable_clk(this); + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); + return ret; } @@ -1754,13 +1741,12 @@ static void gpmi_select_chip(struct nand_chip *chip, int chipnr) * die is selected/unselected. */ if (this->current_chip < 0 && chipnr >= 0) { - ret = gpmi_enable_clk(this); - if (ret) + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) dev_err(this->dev, "Failed to enable the clock\n"); } else if (this->current_chip >= 0 && chipnr < 0) { - ret = gpmi_disable_clk(this); - if (ret) - dev_err(this->dev, "Failed to disable the clock\n"); + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); } /* @@ -2924,6 +2910,16 @@ static int gpmi_nand_probe(struct platform_device *pdev) if (ret) goto exit_acquire_resources; + ret = __gpmi_enable_clk(this, true); + if (ret) + goto exit_nfc_init; + + pm_runtime_set_autosuspend_delay(&pdev->dev, 500); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + ret = gpmi_init(this); if (ret) goto exit_nfc_init; @@ -2932,11 +2928,16 @@ static int gpmi_nand_probe(struct platform_device *pdev) if (ret) goto exit_nfc_init; + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + dev_info(this->dev, "driver registered.\n"); return 0; exit_nfc_init: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); release_resources(this); exit_acquire_resources: @@ -2947,6 +2948,9 @@ static int gpmi_nand_remove(struct platform_device *pdev) { struct gpmi_nand_data *this = platform_get_drvdata(pdev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + nand_release(&this->nand); gpmi_free_dma_buffer(this); release_resources(this); @@ -2989,8 +2993,23 @@ static int gpmi_pm_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ +static int __maybe_unused gpmi_runtime_suspend(struct device *dev) +{ + struct gpmi_nand_data *this = dev_get_drvdata(dev); + + return __gpmi_enable_clk(this, false); +} + +static int __maybe_unused gpmi_runtime_resume(struct device *dev) +{ + struct gpmi_nand_data *this = dev_get_drvdata(dev); + + return __gpmi_enable_clk(this, true); +} + static const struct dev_pm_ops gpmi_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) + SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL) }; static struct platform_driver gpmi_nand_driver = { From d443cb25c0482cf89b7178e94df5665496cab97b Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:39 +0200 Subject: [PATCH 33/60] dmaengine: mxs: Drop unnecessary flag The mxs dma driver insists on having the DMA_PREP_INTERRUPT flag set on all but the first transfer. There's no need to let the user set this flag, the driver can do it internally whenever it needs it. Drop handling of this flag from the driver. Signed-off-by: Sascha Hauer Acked-by: Vinod Koul Signed-off-by: Miquel Raynal --- drivers/dma/mxs-dma.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 22cc7f68ef6e..ce92a3626ea4 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -477,16 +477,16 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) * ...... * ->device_prep_slave_sg(0); * ...... - * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + * ->device_prep_slave_sg(DMA_CTRL_ACK); * ...... * [3] If there are more than two DMA commands in the DMA chain, the code * should be: * ...... * ->device_prep_slave_sg(0); // First * ...... - * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); + * ->device_prep_slave_sg(DMA_CTRL_ACK]); * ...... - * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last + * ->device_prep_slave_sg(DMA_CTRL_ACK); // Last * ...... */ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( @@ -500,13 +500,12 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( struct scatterlist *sg; u32 i, j; u32 *pio; - bool append = flags & DMA_PREP_INTERRUPT; - int idx = append ? mxs_chan->desc_count : 0; + int idx = 0; - if (mxs_chan->status == DMA_IN_PROGRESS && !append) - return NULL; + if (mxs_chan->status == DMA_IN_PROGRESS) + idx = mxs_chan->desc_count; - if (sg_len + (append ? idx : 0) > NUM_CCW) { + if (sg_len + idx > NUM_CCW) { dev_err(mxs_dma->dma_device.dev, "maximum number of sg exceeded: %d > %d\n", sg_len, NUM_CCW); @@ -520,7 +519,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( * If the sg is prepared with append flag set, the sg * will be appended to the last prepared sg. */ - if (append) { + if (idx) { BUG_ON(idx < 1); ccw = &mxs_chan->ccw[idx - 1]; ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; From 475345e89ced298afbeefed6c86122bb8415593d Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:40 +0200 Subject: [PATCH 34/60] mtd: rawnand: gpmi: drop unnecessary flag The DMA_PREP_INTERRUPT flag is no longer needed by the mxs DMA driver, drop it. Signed-off-by: Sascha Hauer Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 0c943ed42c85..74c4de0b1a3d 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -926,8 +926,7 @@ static int gpmi_send_command(struct gpmi_nand_data *this) sg_init_one(sgl, this->cmd_buffer, this->command_length); dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(channel, - sgl, 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + sgl, 1, DMA_MEM_TO_DEV, DMA_CTRL_ACK); if (!desc) return -EINVAL; @@ -997,8 +996,7 @@ static int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len) /* [2] send DMA request */ prepare_data_dma(this, buf, len, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + 1, DMA_MEM_TO_DEV, DMA_CTRL_ACK); if (!desc) return -EINVAL; @@ -1036,8 +1034,7 @@ static int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len) /* [2] : send DMA request */ direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE); desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + 1, DMA_DEV_TO_MEM, DMA_CTRL_ACK); if (!desc) return -EINVAL; @@ -1150,7 +1147,7 @@ static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, ARRAY_SIZE(pio), DMA_TRANS_NONE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_CTRL_ACK); if (!desc) return -EINVAL; @@ -1168,8 +1165,7 @@ static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */ desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, 3, - DMA_TRANS_NONE, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_TRANS_NONE, DMA_CTRL_ACK); if (!desc) return -EINVAL; From e0ddaab76802d3179013f4864535043e2aea6c69 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:41 +0200 Subject: [PATCH 35/60] dmaengine: mxs: Add header file to be shared with gpmi nand driver The mxs dma driver can do PIO transfers. A pointer to the PIO words to transfer is passed in the struct scatterlist * argument of dmaengine_prep_slave_sg(). It's quite ugly and non obvious to cast u32 * to struct scatterlist * each time when calling dmaengine_prep_slave_sg(), so add a static inline wrapper function to be called by the user along with a description what is going on. Signed-off-by: Sascha Hauer Acked-by: Vinod Koul Signed-off-by: Miquel Raynal --- drivers/dma/mxs-dma.c | 1 + drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 36 +++++++++------------- include/linux/dma/mxs-dma.h | 21 +++++++++++++ 3 files changed, 36 insertions(+), 22 deletions(-) create mode 100644 include/linux/dma/mxs-dma.h diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ce92a3626ea4..62ee9328aea1 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -24,6 +24,7 @@ #include #include #include +#include #include diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 74c4de0b1a3d..45c7b91aae23 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "gpmi-nand.h" #include "gpmi-regs.h" #include "bch-regs.h" @@ -914,9 +915,8 @@ static int gpmi_send_command(struct gpmi_nand_data *this) | BM_GPMI_CTRL0_ADDRESS_INCREMENT | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); pio[1] = pio[2] = 0; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, 0); if (!desc) return -EINVAL; @@ -988,8 +988,8 @@ static int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len) | BF_GPMI_CTRL0_ADDRESS(address) | BF_GPMI_CTRL0_XFER_COUNT(len); pio[1] = 0; - desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, 0); if (!desc) return -EINVAL; @@ -1025,9 +1025,8 @@ static int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len) | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) | BF_GPMI_CTRL0_XFER_COUNT(len); pio[1] = 0; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, 0); if (!desc) return -EINVAL; @@ -1083,10 +1082,8 @@ static int gpmi_send_page(struct gpmi_nand_data *this, dma_addr_t payload, pio[4] = payload; pio[5] = auxiliary; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, - DMA_CTRL_ACK); + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, DMA_CTRL_ACK); if (!desc) return -EINVAL; @@ -1117,9 +1114,7 @@ static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, | BF_GPMI_CTRL0_ADDRESS(address) | BF_GPMI_CTRL0_XFER_COUNT(0); pio[1] = 0; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, 2, - DMA_TRANS_NONE, 0); + desc = mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE, 0); if (!desc) return -EINVAL; @@ -1144,10 +1139,8 @@ static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, pio[3] = geo->page_size; pio[4] = payload; pio[5] = auxiliary; - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_TRANS_NONE, - DMA_CTRL_ACK); + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, DMA_CTRL_ACK); if (!desc) return -EINVAL; @@ -1163,9 +1156,8 @@ static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); pio[1] = 0; pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */ - desc = dmaengine_prep_slave_sg(channel, - (struct scatterlist *)pio, 3, - DMA_TRANS_NONE, DMA_CTRL_ACK); + desc = mxs_dmaengine_prep_pio(channel, pio, 3, DMA_TRANS_NONE, + DMA_CTRL_ACK); if (!desc) return -EINVAL; diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h new file mode 100644 index 000000000000..092b2a7b92ac --- /dev/null +++ b/include/linux/dma/mxs-dma.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _MXS_DMA_H_ +#define _MXS_DMA_H_ + +#include + +/* + * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words + * in the second argument to dmaengine_prep_slave_sg when the direction is + * set to DMA_TRANS_NONE. To make this clear and to prevent users from doing + * the error prone casting we have this wrapper function + */ +static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio( + struct dma_chan *chan, u32 *pio, unsigned int npio, + enum dma_transfer_direction dir, unsigned long flags) +{ + return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio, + dir, flags); +} + +#endif /* _MXS_DMA_H_ */ From ceeeb99cd821a2f7493e1e0e1eca5afc7a205213 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:42 +0200 Subject: [PATCH 36/60] dmaengine: mxs: rename custom flag The mxs dma driver uses the flags parameter in dmaengine_prep_slave_sg() for custom flags, but still uses the dmaengine specific names of the flags. Do a little bit better and at least give the flag a custom name. Signed-off-by: Sascha Hauer Acked-by: Vinod Koul Signed-off-by: Miquel Raynal --- drivers/dma/mxs-dma.c | 4 ++-- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 17 +++++++++++------ include/linux/dma/mxs-dma.h | 2 ++ 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 62ee9328aea1..c622bee7eb12 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -541,7 +541,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->bits = 0; ccw->bits |= CCW_IRQ; ccw->bits |= CCW_DEC_SEM; - if (flags & DMA_CTRL_ACK) + if (flags & MXS_DMA_CTRL_WAIT4END) ccw->bits |= CCW_WAIT4END; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; @@ -573,7 +573,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->bits &= ~CCW_CHAIN; ccw->bits |= CCW_IRQ; ccw->bits |= CCW_DEC_SEM; - if (flags & DMA_CTRL_ACK) + if (flags & MXS_DMA_CTRL_WAIT4END) ccw->bits |= CCW_WAIT4END; } } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 45c7b91aae23..d088b3e77fef 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -926,7 +926,8 @@ static int gpmi_send_command(struct gpmi_nand_data *this) sg_init_one(sgl, this->cmd_buffer, this->command_length); dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(channel, - sgl, 1, DMA_MEM_TO_DEV, DMA_CTRL_ACK); + sgl, 1, DMA_MEM_TO_DEV, + MXS_DMA_CTRL_WAIT4END); if (!desc) return -EINVAL; @@ -996,7 +997,8 @@ static int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len) /* [2] send DMA request */ prepare_data_dma(this, buf, len, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_MEM_TO_DEV, DMA_CTRL_ACK); + 1, DMA_MEM_TO_DEV, + MXS_DMA_CTRL_WAIT4END); if (!desc) return -EINVAL; @@ -1033,7 +1035,8 @@ static int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len) /* [2] : send DMA request */ direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE); desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_DEV_TO_MEM, DMA_CTRL_ACK); + 1, DMA_DEV_TO_MEM, + MXS_DMA_CTRL_WAIT4END); if (!desc) return -EINVAL; @@ -1083,7 +1086,8 @@ static int gpmi_send_page(struct gpmi_nand_data *this, dma_addr_t payload, pio[5] = auxiliary; desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), - DMA_TRANS_NONE, DMA_CTRL_ACK); + DMA_TRANS_NONE, + MXS_DMA_CTRL_WAIT4END); if (!desc) return -EINVAL; @@ -1140,7 +1144,8 @@ static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, pio[4] = payload; pio[5] = auxiliary; desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), - DMA_TRANS_NONE, DMA_CTRL_ACK); + DMA_TRANS_NONE, + MXS_DMA_CTRL_WAIT4END); if (!desc) return -EINVAL; @@ -1157,7 +1162,7 @@ static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, pio[1] = 0; pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */ desc = mxs_dmaengine_prep_pio(channel, pio, 3, DMA_TRANS_NONE, - DMA_CTRL_ACK); + MXS_DMA_CTRL_WAIT4END); if (!desc) return -EINVAL; diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h index 092b2a7b92ac..4a33f2c8a682 100644 --- a/include/linux/dma/mxs-dma.h +++ b/include/linux/dma/mxs-dma.h @@ -4,6 +4,8 @@ #include +#define MXS_DMA_CTRL_WAIT4END BIT(31) + /* * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words * in the second argument to dmaengine_prep_slave_sg when the direction is From ef347c0cfd619a9251e5a2f9ff72e33650a9bccb Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 21 May 2019 09:06:43 +0200 Subject: [PATCH 37/60] mtd: rawnand: gpmi: Implement exec_op The gpmi driver performance suffers from NAND operations being split in multiple small DMA transfers. This has been forced by the NAND layer in the former days, but now with exec_op we can use the controller as intended. With this patch gpmi_nfc_exec_op becomes the main entry point to NAND operations. Here all instructions are collected and chained as separate DMA transfers. In the end whole chain is fired and waited to be finished. gpmi_nfc_exec_op only does the hardware operations, bad block marker swapping and buffer scrambling is done by the callers. It's worth noting that the nand_*_op functions always take the buffer lengths for the data that the NAND chip actually transfers. When doing BCH we have to calculate the net data size from the raw data size in some places. This patch has been tested with 2048/64 and 2048/128 byte NAND on i.MX6q. mtd_oobtest, mtd_subpagetest and mtd_speedtest run without errors. nandbiterrs, nandpagetest and nandsubpagetest userspace tests from mtdutils run without errors and UBIFS can successfully be mounted. Signed-off-by: Sascha Hauer Signed-off-by: Miquel Raynal --- drivers/dma/mxs-dma.c | 3 + drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 1105 ++++++++------------ drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h | 25 +- include/linux/dma/mxs-dma.h | 1 + 4 files changed, 444 insertions(+), 690 deletions(-) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index c622bee7eb12..20a9cb7cb6d3 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -78,6 +78,7 @@ #define BM_CCW_COMMAND (3 << 0) #define CCW_CHAIN (1 << 2) #define CCW_IRQ (1 << 3) +#define CCW_WAIT4RDY (1 << 5) #define CCW_DEC_SEM (1 << 6) #define CCW_WAIT4END (1 << 7) #define CCW_HALT_ON_TERM (1 << 8) @@ -547,6 +548,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->bits |= CCW_TERM_FLUSH; ccw->bits |= BF_CCW(sg_len, PIO_NUM); ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); + if (flags & MXS_DMA_CTRL_WAIT4RDY) + ccw->bits |= CCW_WAIT4RDY; } else { for_each_sg(sgl, sg, sg_len, i) { if (sg_dma_len(sg) > MAX_XFER_BYTES) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index d088b3e77fef..5db84178edff 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -528,26 +528,12 @@ static int common_nfc_set_geometry(struct gpmi_nand_data *this) static int bch_set_geometry(struct gpmi_nand_data *this) { struct resources *r = &this->resources; - struct bch_geometry *bch_geo = &this->bch_geometry; - unsigned int block_count; - unsigned int block_size; - unsigned int metadata_size; - unsigned int ecc_strength; - unsigned int page_size; - unsigned int gf_len; int ret; ret = common_nfc_set_geometry(this); if (ret) return ret; - block_count = bch_geo->ecc_chunk_count - 1; - block_size = bch_geo->ecc_chunk_size; - metadata_size = bch_geo->metadata_size; - ecc_strength = bch_geo->ecc_strength >> 1; - page_size = bch_geo->page_size; - gf_len = bch_geo->gf_len; - ret = pm_runtime_get_sync(this->dev); if (ret < 0) return ret; @@ -561,27 +547,9 @@ static int bch_set_geometry(struct gpmi_nand_data *this) if (ret) goto err_out; - /* Configure layout 0. */ - writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) - | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) - | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) - | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) - | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this), - r->bch_regs + HW_BCH_FLASH0LAYOUT0); - - writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) - | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) - | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) - | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this), - r->bch_regs + HW_BCH_FLASH0LAYOUT1); - /* Set *all* chip selects to use layout 0. */ writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT); - /* Enable interrupts. */ - writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, - r->bch_regs + HW_BCH_CTRL_SET); - ret = 0; err_out: pm_runtime_mark_last_busy(this->dev); @@ -795,32 +763,6 @@ static void gpmi_clear_bch(struct gpmi_nand_data *this) writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR); } -/* Returns the Ready/Busy status of the given chip. */ -static int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip) -{ - struct resources *r = &this->resources; - uint32_t mask = 0; - uint32_t reg = 0; - - if (GPMI_IS_MX23(this)) { - mask = MX23_BM_GPMI_DEBUG_READY0 << chip; - reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); - } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) { - /* - * In the imx6, all the ready/busy pins are bound - * together. So we only need to check chip 0. - */ - if (GPMI_IS_MX6(this)) - chip = 0; - - /* MX28 shares the same R/B register as MX6Q. */ - mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); - reg = readl(r->gpmi_regs + HW_GPMI_STAT); - } else - dev_err(this->dev, "unknown arch.\n"); - return reg & mask; -} - static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) { /* We use the DMA channel 0 to access all the nand chips. */ @@ -836,29 +778,6 @@ static void dma_irq_callback(void *param) complete(dma_c); } -static int start_dma_without_bch_irq(struct gpmi_nand_data *this, - struct dma_async_tx_descriptor *desc) -{ - struct completion *dma_c = &this->dma_done; - unsigned long timeout; - - init_completion(dma_c); - - desc->callback = dma_irq_callback; - desc->callback_param = this; - dmaengine_submit(desc); - dma_async_issue_pending(get_dma_chan(this)); - - /* Wait for the interrupt from the DMA block. */ - timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); - if (!timeout) { - dev_err(this->dev, "DMA timeout, last DMA\n"); - gpmi_dump_info(this); - return -ETIMEDOUT; - } - return 0; -} - static irqreturn_t bch_irq(int irq, void *cookie) { struct gpmi_nand_data *this = cookie; @@ -868,83 +787,25 @@ static irqreturn_t bch_irq(int irq, void *cookie) return IRQ_HANDLED; } -/* - * This function is used in BCH reading or BCH writing pages. - * It will wait for the BCH interrupt as long as ONE second. - * Actually, we must wait for two interrupts : - * [1] firstly the DMA interrupt and - * [2] secondly the BCH interrupt. - */ -static int start_dma_with_bch_irq(struct gpmi_nand_data *this, - struct dma_async_tx_descriptor *desc) +static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len) { - struct completion *bch_c = &this->bch_done; - unsigned long timeout; - - /* Prepare to receive an interrupt from the BCH block. */ - init_completion(bch_c); - - /* start the DMA */ - start_dma_without_bch_irq(this, desc); - - /* Wait for the interrupt from the BCH block. */ - timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); - if (!timeout) { - dev_err(this->dev, "BCH timeout\n"); - gpmi_dump_info(this); - return -ETIMEDOUT; - } - return 0; -} - -static int gpmi_send_command(struct gpmi_nand_data *this) -{ - struct dma_chan *channel = get_dma_chan(this); - struct dma_async_tx_descriptor *desc; - struct scatterlist *sgl; - int chip = this->current_chip; - int ret; - u32 pio[3]; - - /* [1] send out the PIO words */ - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) - | BM_GPMI_CTRL0_ADDRESS_INCREMENT - | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); - pio[1] = pio[2] = 0; - desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), - DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] send out the COMMAND + ADDRESS string stored in @buffer */ - sgl = &this->cmd_sgl; - - sg_init_one(sgl, this->cmd_buffer, this->command_length); - dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); - desc = dmaengine_prep_slave_sg(channel, - sgl, 1, DMA_MEM_TO_DEV, - MXS_DMA_CTRL_WAIT4END); - if (!desc) - return -EINVAL; - - /* [3] submit the DMA */ - ret = start_dma_without_bch_irq(this, desc); - - dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE); - - return ret; + /* + * raw_len is the length to read/write including bch data which + * we are passed in exec_op. Calculate the data length from it. + */ + if (this->bch) + return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size); + else + return raw_len; } /* Can we use the upper's buffer directly for DMA? */ static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, - int len, enum dma_data_direction dr) + int raw_len, struct scatterlist *sgl, + enum dma_data_direction dr) { - struct scatterlist *sgl = &this->data_sgl; int ret; + int len = gpmi_raw_len_to_len(this, raw_len); /* first try to map the upper buffer directly */ if (virt_addr_valid(buf) && !object_is_on_stack(buf)) { @@ -960,7 +821,7 @@ map_fail: /* We have to use our own DMA buffer. */ sg_init_one(sgl, this->data_buffer_dma, len); - if (dr == DMA_TO_DEVICE) + if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma) memcpy(this->data_buffer_dma, buf, len); dma_map_sg(this->dev, sgl, 1, dr); @@ -968,208 +829,6 @@ map_fail: return false; } -static int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len) -{ - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - int ret; - uint32_t command_mode; - uint32_t address; - u32 pio[2]; - - /* [1] PIO */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(len); - pio[1] = 0; - desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), - DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] send DMA request */ - prepare_data_dma(this, buf, len, DMA_TO_DEVICE); - desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_MEM_TO_DEV, - MXS_DMA_CTRL_WAIT4END); - if (!desc) - return -EINVAL; - - /* [3] submit the DMA */ - ret = start_dma_without_bch_irq(this, desc); - - dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); - - return ret; -} - -static int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len) -{ - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - int ret; - u32 pio[2]; - bool direct; - - /* [1] : send PIO */ - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) - | BF_GPMI_CTRL0_XFER_COUNT(len); - pio[1] = 0; - desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), - DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] : send DMA request */ - direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE); - desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_DEV_TO_MEM, - MXS_DMA_CTRL_WAIT4END); - if (!desc) - return -EINVAL; - - /* [3] : submit the DMA */ - - ret = start_dma_without_bch_irq(this, desc); - - dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); - if (!direct) - memcpy(buf, this->data_buffer_dma, len); - - return ret; -} - -static int gpmi_send_page(struct gpmi_nand_data *this, dma_addr_t payload, - dma_addr_t auxiliary) -{ - struct bch_geometry *geo = &this->bch_geometry; - uint32_t command_mode; - uint32_t address; - uint32_t ecc_command; - uint32_t buffer_mask; - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - u32 pio[6]; - - /* A DMA descriptor that does an ECC page read. */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE; - buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | - BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(0); - pio[1] = 0; - pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC - | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) - | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); - pio[3] = geo->page_size; - pio[4] = payload; - pio[5] = auxiliary; - - desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), - DMA_TRANS_NONE, - MXS_DMA_CTRL_WAIT4END); - if (!desc) - return -EINVAL; - - return start_dma_with_bch_irq(this, desc); -} - -static int gpmi_read_page(struct gpmi_nand_data *this, dma_addr_t payload, - dma_addr_t auxiliary) -{ - struct bch_geometry *geo = &this->bch_geometry; - uint32_t command_mode; - uint32_t address; - uint32_t ecc_command; - uint32_t buffer_mask; - struct dma_async_tx_descriptor *desc; - struct dma_chan *channel = get_dma_chan(this); - int chip = this->current_chip; - u32 pio[6]; - - /* [1] Wait for the chip to report ready. */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(0); - pio[1] = 0; - desc = mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE, 0); - if (!desc) - return -EINVAL; - - /* [2] Enable the BCH block and read. */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE; - buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE - | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); - - pio[1] = 0; - pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC - | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) - | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); - pio[3] = geo->page_size; - pio[4] = payload; - pio[5] = auxiliary; - desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), - DMA_TRANS_NONE, - MXS_DMA_CTRL_WAIT4END); - if (!desc) - return -EINVAL; - - /* [3] Disable the BCH block */ - command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; - address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; - - pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) - | BM_GPMI_CTRL0_WORD_LENGTH - | BF_GPMI_CTRL0_CS(chip, this) - | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) - | BF_GPMI_CTRL0_ADDRESS(address) - | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); - pio[1] = 0; - pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */ - desc = mxs_dmaengine_prep_pio(channel, pio, 3, DMA_TRANS_NONE, - MXS_DMA_CTRL_WAIT4END); - if (!desc) - return -EINVAL; - - /* [4] submit the DMA */ - return start_dma_with_bch_irq(this, desc); -} - /** * gpmi_copy_bits - copy bits from one memory region to another * @dst: destination buffer @@ -1568,67 +1227,20 @@ static void release_resources(struct gpmi_nand_data *this) release_dma_channels(this); } -static int send_page_prepare(struct gpmi_nand_data *this, - const void *source, unsigned length, - void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, - const void **use_virt, dma_addr_t *use_phys) -{ - struct device *dev = this->dev; - - if (virt_addr_valid(source)) { - dma_addr_t source_phys; - - source_phys = dma_map_single(dev, (void *)source, length, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, source_phys)) { - if (alt_size < length) { - dev_err(dev, "Alternate buffer is too small\n"); - return -ENOMEM; - } - goto map_failed; - } - *use_virt = source; - *use_phys = source_phys; - return 0; - } -map_failed: - /* - * Copy the content of the source buffer into the alternate - * buffer and set up the return values accordingly. - */ - memcpy(alt_virt, source, length); - - *use_virt = alt_virt; - *use_phys = alt_phys; - return 0; -} - -static void send_page_end(struct gpmi_nand_data *this, - const void *source, unsigned length, - void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, - const void *used_virt, dma_addr_t used_phys) -{ - struct device *dev = this->dev; - if (used_virt == source) - dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE); -} - static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) { struct device *dev = this->dev; + struct bch_geometry *geo = &this->bch_geometry; - if (this->payload_virt && virt_addr_valid(this->payload_virt)) - dma_free_coherent(dev, this->page_buffer_size, - this->payload_virt, - this->payload_phys); - kfree(this->cmd_buffer); + if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt)) + dma_free_coherent(dev, geo->auxiliary_size, + this->auxiliary_virt, + this->auxiliary_phys); kfree(this->data_buffer_dma); kfree(this->raw_buffer); - this->cmd_buffer = NULL; this->data_buffer_dma = NULL; this->raw_buffer = NULL; - this->page_buffer_size = 0; } /* Allocate the DMA buffers */ @@ -1638,11 +1250,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) struct device *dev = this->dev; struct mtd_info *mtd = nand_to_mtd(&this->nand); - /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ - this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); - if (this->cmd_buffer == NULL) - goto error_alloc; - /* * [2] Allocate a read/write data buffer. * The gpmi_alloc_dma_buffer can be called twice. @@ -1656,27 +1263,15 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) if (this->data_buffer_dma == NULL) goto error_alloc; - /* - * [3] Allocate the page buffer. - * - * Both the payload buffer and the auxiliary buffer must appear on - * 32-bit boundaries. We presume the size of the payload buffer is a - * power of two and is much larger than four, which guarantees the - * auxiliary buffer will appear on a 32-bit boundary. - */ - this->page_buffer_size = geo->payload_size + geo->auxiliary_size; - this->payload_virt = dma_alloc_coherent(dev, this->page_buffer_size, - &this->payload_phys, GFP_DMA); - if (!this->payload_virt) + this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size, + &this->auxiliary_phys, GFP_DMA); + if (!this->auxiliary_virt) goto error_alloc; - this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); + this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL); if (!this->raw_buffer) goto error_alloc; - /* Slice up the page buffer. */ - this->auxiliary_virt = this->payload_virt + geo->payload_size; - this->auxiliary_phys = this->payload_phys + geo->payload_size; return 0; error_alloc: @@ -1684,105 +1279,6 @@ error_alloc: return -ENOMEM; } -static void gpmi_cmd_ctrl(struct nand_chip *chip, int data, unsigned int ctrl) -{ - struct gpmi_nand_data *this = nand_get_controller_data(chip); - int ret; - - /* - * Every operation begins with a command byte and a series of zero or - * more address bytes. These are distinguished by either the Address - * Latch Enable (ALE) or Command Latch Enable (CLE) signals being - * asserted. When MTD is ready to execute the command, it will deassert - * both latch enables. - * - * Rather than run a separate DMA operation for every single byte, we - * queue them up and run a single DMA operation for the entire series - * of command and data bytes. NAND_CMD_NONE means the END of the queue. - */ - if ((ctrl & (NAND_ALE | NAND_CLE))) { - if (data != NAND_CMD_NONE) - this->cmd_buffer[this->command_length++] = data; - return; - } - - if (!this->command_length) - return; - - ret = gpmi_send_command(this); - if (ret) - dev_err(this->dev, "Chip: %u, Error %d\n", - this->current_chip, ret); - - this->command_length = 0; -} - -static int gpmi_dev_ready(struct nand_chip *chip) -{ - struct gpmi_nand_data *this = nand_get_controller_data(chip); - - return gpmi_is_ready(this, this->current_chip); -} - -static void gpmi_select_chip(struct nand_chip *chip, int chipnr) -{ - struct gpmi_nand_data *this = nand_get_controller_data(chip); - int ret; - - /* - * For power consumption matters, disable/enable the clock each time a - * die is selected/unselected. - */ - if (this->current_chip < 0 && chipnr >= 0) { - ret = pm_runtime_get_sync(this->dev); - if (ret < 0) - dev_err(this->dev, "Failed to enable the clock\n"); - } else if (this->current_chip >= 0 && chipnr < 0) { - pm_runtime_mark_last_busy(this->dev); - pm_runtime_put_autosuspend(this->dev); - } - - /* - * This driver currently supports only one NAND chip. Plus, dies share - * the same configuration. So once timings have been applied on the - * controller side, they will not change anymore. When the time will - * come, the check on must_apply_timings will have to be dropped. - */ - if (chipnr >= 0 && this->hw.must_apply_timings) { - this->hw.must_apply_timings = false; - gpmi_nfc_apply_timings(this); - } - - this->current_chip = chipnr; -} - -static void gpmi_read_buf(struct nand_chip *chip, uint8_t *buf, int len) -{ - struct gpmi_nand_data *this = nand_get_controller_data(chip); - - dev_dbg(this->dev, "len is %d\n", len); - - gpmi_read_data(this, buf, len); -} - -static void gpmi_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) -{ - struct gpmi_nand_data *this = nand_get_controller_data(chip); - - dev_dbg(this->dev, "len is %d\n", len); - - gpmi_send_data(this, buf, len); -} - -static uint8_t gpmi_read_byte(struct nand_chip *chip) -{ - struct gpmi_nand_data *this = nand_get_controller_data(chip); - uint8_t *buf = this->data_buffer_dma; - - gpmi_read_buf(chip, buf, 1); - return buf[0]; -} - /* * Handles block mark swapping. * It can be called in swapping the block mark, or swapping it back, @@ -1831,50 +1327,20 @@ static void block_mark_swapping(struct gpmi_nand_data *this, p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); } -static int gpmi_ecc_read_page_data(struct nand_chip *chip, uint8_t *buf) +static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first, + int last, int meta) { struct gpmi_nand_data *this = nand_get_controller_data(chip); struct bch_geometry *nfc_geo = &this->bch_geometry; struct mtd_info *mtd = nand_to_mtd(chip); - dma_addr_t payload_phys; - unsigned int i; + int i; unsigned char *status; - unsigned int max_bitflips = 0; - int ret; - bool direct = false; - - payload_phys = this->payload_phys; - - if (virt_addr_valid(buf)) { - dma_addr_t dest_phys; - - dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size, - DMA_FROM_DEVICE); - if (!dma_mapping_error(this->dev, dest_phys)) { - payload_phys = dest_phys; - direct = true; - } - } - - /* go! */ - ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys); - - if (direct) - dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size, - DMA_FROM_DEVICE); - - if (ret) { - dev_err(this->dev, "Error in ECC-based read: %d\n", ret); - return ret; - } + unsigned int max_bitflips = 0; /* Loop over status bytes, accumulating ECC status. */ - status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset; + status = this->auxiliary_virt + ALIGN(meta, 4); - if (!direct) - memcpy(buf, this->payload_virt, nfc_geo->payload_size); - - for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { + for (i = first; i < last; i++, status++) { if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) continue; @@ -1954,25 +1420,53 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip, uint8_t *buf) max_bitflips = max_t(unsigned int, max_bitflips, *status); } - /* handle the block mark swapping */ - block_mark_swapping(this, buf, this->auxiliary_virt); - return max_bitflips; } +static void gpmi_bch_layout_std(struct gpmi_nand_data *this) +{ + struct bch_geometry *geo = &this->bch_geometry; + unsigned int ecc_strength = geo->ecc_strength >> 1; + unsigned int gf_len = geo->gf_len; + unsigned int block_size = block_size = geo->ecc_chunk_size; + + this->bch_flashlayout0 = + BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) | + BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) | + BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | + BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) | + BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this); + + this->bch_flashlayout1 = + BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) | + BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | + BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) | + BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this); +} + static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct gpmi_nand_data *this = nand_get_controller_data(chip); struct mtd_info *mtd = nand_to_mtd(chip); + struct bch_geometry *geo = &this->bch_geometry; + unsigned int max_bitflips; int ret; - nand_read_page_op(chip, page, 0, NULL, 0); + gpmi_bch_layout_std(this); + this->bch = true; - ret = gpmi_ecc_read_page_data(chip, buf); - if (ret < 0) + ret = nand_read_page_op(chip, page, 0, buf, geo->page_size); + if (ret) return ret; + max_bitflips = gpmi_count_bitflips(chip, buf, 0, + geo->ecc_chunk_count, + geo->auxiliary_status_offset); + + /* handle the block mark swapping */ + block_mark_swapping(this, buf, this->auxiliary_virt); + if (oob_required) { /* * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() @@ -1988,7 +1482,7 @@ static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf, chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0]; } - return ret; + return max_bitflips; } /* Fake a virtual small page for the subpage read */ @@ -1996,17 +1490,15 @@ static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs, uint32_t len, uint8_t *buf, int page) { struct gpmi_nand_data *this = nand_get_controller_data(chip); - void __iomem *bch_regs = this->resources.bch_regs; - struct bch_geometry old_geo = this->bch_geometry; struct bch_geometry *geo = &this->bch_geometry; int size = chip->ecc.size; /* ECC chunk size */ int meta, n, page_size; - u32 r1_old, r2_old, r1_new, r2_new; unsigned int max_bitflips; + unsigned int ecc_strength; int first, last, marker_pos; int ecc_parity_size; int col = 0; - int old_swap_block_mark = this->swap_block_mark; + int ret; /* The size of ECC parity */ ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; @@ -2039,43 +1531,33 @@ static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs, buf = buf + first * size; } - nand_read_page_op(chip, page, col, NULL, 0); + ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; - /* Save the old environment */ - r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0); - r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1); - - /* change the BCH registers and bch_geometry{} */ n = last - first + 1; page_size = meta + (size + ecc_parity_size) * n; + ecc_strength = geo->ecc_strength >> 1; - r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS | - BM_BCH_FLASH0LAYOUT0_META_SIZE); - r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) - | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta); - writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0); + this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) | + BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) | + BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | + BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) | + BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this); - r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE; - r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size); - writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1); + this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) | + BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | + BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) | + BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this); - geo->ecc_chunk_count = n; - geo->payload_size = n * size; - geo->page_size = page_size; - geo->auxiliary_status_offset = ALIGN(meta, 4); + this->bch = true; + + ret = nand_read_page_op(chip, page, col, buf, page_size); + if (ret) + return ret; dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n", page, offs, len, col, first, n, page_size); - /* Read the subpage now */ - this->swap_block_mark = false; - max_bitflips = gpmi_ecc_read_page_data(chip, buf); - - /* Restore */ - writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0); - writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1); - this->bch_geometry = old_geo; - this->swap_block_mark = old_swap_block_mark; + max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta); return max_bitflips; } @@ -2086,81 +1568,29 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf, struct mtd_info *mtd = nand_to_mtd(chip); struct gpmi_nand_data *this = nand_get_controller_data(chip); struct bch_geometry *nfc_geo = &this->bch_geometry; - const void *payload_virt; - dma_addr_t payload_phys; - const void *auxiliary_virt; - dma_addr_t auxiliary_phys; - int ret; + int ret; dev_dbg(this->dev, "ecc write page.\n"); - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + gpmi_bch_layout_std(this); + this->bch = true; + + memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size); if (this->swap_block_mark) { /* - * If control arrives here, we're doing block mark swapping. - * Since we can't modify the caller's buffers, we must copy them - * into our own. + * When doing bad block marker swapping we must always copy the + * input buffer as we can't modify the const buffer. */ - memcpy(this->payload_virt, buf, mtd->writesize); - payload_virt = this->payload_virt; - payload_phys = this->payload_phys; - - memcpy(this->auxiliary_virt, chip->oob_poi, - nfc_geo->auxiliary_size); - auxiliary_virt = this->auxiliary_virt; - auxiliary_phys = this->auxiliary_phys; - - /* Handle block mark swapping. */ - block_mark_swapping(this, - (void *)payload_virt, (void *)auxiliary_virt); - } else { - /* - * If control arrives here, we're not doing block mark swapping, - * so we can to try and use the caller's buffers. - */ - ret = send_page_prepare(this, - buf, mtd->writesize, - this->payload_virt, this->payload_phys, - nfc_geo->payload_size, - &payload_virt, &payload_phys); - if (ret) { - dev_err(this->dev, "Inadequate payload DMA buffer\n"); - return 0; - } - - ret = send_page_prepare(this, - chip->oob_poi, mtd->oobsize, - this->auxiliary_virt, this->auxiliary_phys, - nfc_geo->auxiliary_size, - &auxiliary_virt, &auxiliary_phys); - if (ret) { - dev_err(this->dev, "Inadequate auxiliary DMA buffer\n"); - goto exit_auxiliary; - } + memcpy(this->data_buffer_dma, buf, mtd->writesize); + buf = this->data_buffer_dma; + block_mark_swapping(this, this->data_buffer_dma, + this->auxiliary_virt); } - /* Ask the NFC. */ - ret = gpmi_send_page(this, payload_phys, auxiliary_phys); - if (ret) - dev_err(this->dev, "Error in ECC-based write: %d\n", ret); + ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size); - if (!this->swap_block_mark) { - send_page_end(this, chip->oob_poi, mtd->oobsize, - this->auxiliary_virt, this->auxiliary_phys, - nfc_geo->auxiliary_size, - auxiliary_virt, auxiliary_phys); -exit_auxiliary: - send_page_end(this, buf, mtd->writesize, - this->payload_virt, this->payload_phys, - nfc_geo->payload_size, - payload_virt, payload_phys); - } - - if (ret) - return ret; - - return nand_prog_page_end_op(chip); + return ret; } /* @@ -2229,7 +1659,6 @@ static int gpmi_ecc_read_oob(struct nand_chip *chip, int page) struct gpmi_nand_data *this = nand_get_controller_data(chip); int ret; - dev_dbg(this->dev, "page number is %d\n", page); /* clear the OOB buffer */ memset(chip->oob_poi, ~0, mtd->oobsize); @@ -2297,9 +1726,12 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, size_t oob_byte_off; uint8_t *oob = chip->oob_poi; int step; + int ret; - nand_read_page_op(chip, page, 0, tmp_buf, - mtd->writesize + mtd->oobsize); + ret = nand_read_page_op(chip, page, 0, tmp_buf, + mtd->writesize + mtd->oobsize); + if (ret) + return ret; /* * If required, swap the bad block marker and the data stored in the @@ -2789,9 +2221,330 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip) return 0; } +static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this) +{ + struct gpmi_transfer *transfer = &this->transfers[this->ntransfers]; + + this->ntransfers++; + + if (this->ntransfers == GPMI_MAX_TRANSFERS) + return NULL; + + return transfer; +} + +static struct dma_async_tx_descriptor *gpmi_chain_command( + struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr) +{ + struct dma_chan *channel = get_dma_chan(this); + struct dma_async_tx_descriptor *desc; + struct gpmi_transfer *transfer; + int chip = this->nand.cur_cs; + u32 pio[3]; + + /* [1] send out the PIO words */ + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(chip, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) + | BM_GPMI_CTRL0_ADDRESS_INCREMENT + | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1); + pio[1] = 0; + pio[2] = 0; + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, 0); + if (!desc) + return NULL; + + transfer = get_next_transfer(this); + if (!transfer) + return NULL; + + transfer->cmdbuf[0] = cmd; + if (naddr) + memcpy(&transfer->cmdbuf[1], addr, naddr); + + sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1); + dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE); + + transfer->direction = DMA_TO_DEVICE; + + desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV, + MXS_DMA_CTRL_WAIT4END); + return desc; +} + +static struct dma_async_tx_descriptor *gpmi_chain_wait_ready( + struct gpmi_nand_data *this) +{ + struct dma_chan *channel = get_dma_chan(this); + u32 pio[2]; + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) + | BF_GPMI_CTRL0_XFER_COUNT(0); + pio[1] = 0; + + return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE, + MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY); +} + +static struct dma_async_tx_descriptor *gpmi_chain_data_read( + struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct) +{ + struct dma_async_tx_descriptor *desc; + struct dma_chan *channel = get_dma_chan(this); + struct gpmi_transfer *transfer; + u32 pio[6] = {}; + + transfer = get_next_transfer(this); + if (!transfer) + return NULL; + + transfer->direction = DMA_FROM_DEVICE; + + *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl, + DMA_FROM_DEVICE); + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) + | BF_GPMI_CTRL0_XFER_COUNT(raw_len); + + if (this->bch) { + pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC + | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE) + | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE + | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); + pio[3] = raw_len; + pio[4] = transfer->sgl.dma_address; + pio[5] = this->auxiliary_phys; + } + + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, 0); + if (!desc) + return NULL; + + if (!this->bch) + desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, + DMA_DEV_TO_MEM, + MXS_DMA_CTRL_WAIT4END); + + return desc; +} + +static struct dma_async_tx_descriptor *gpmi_chain_data_write( + struct gpmi_nand_data *this, const void *buf, int raw_len) +{ + struct dma_chan *channel = get_dma_chan(this); + struct dma_async_tx_descriptor *desc; + struct gpmi_transfer *transfer; + u32 pio[6] = {}; + + transfer = get_next_transfer(this); + if (!transfer) + return NULL; + + transfer->direction = DMA_TO_DEVICE; + + prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE); + + pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) + | BM_GPMI_CTRL0_WORD_LENGTH + | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) + | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) + | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) + | BF_GPMI_CTRL0_XFER_COUNT(raw_len); + + if (this->bch) { + pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC + | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE) + | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | + BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); + pio[3] = raw_len; + pio[4] = transfer->sgl.dma_address; + pio[5] = this->auxiliary_phys; + } + + desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio), + DMA_TRANS_NONE, + (this->bch ? MXS_DMA_CTRL_WAIT4END : 0)); + if (!desc) + return NULL; + + if (!this->bch) + desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, + DMA_MEM_TO_DEV, + MXS_DMA_CTRL_WAIT4END); + + return desc; +} + +static int gpmi_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + const struct nand_op_instr *instr; + struct gpmi_nand_data *this = nand_get_controller_data(chip); + struct dma_async_tx_descriptor *desc = NULL; + int i, ret, buf_len = 0, nbufs = 0; + u8 cmd = 0; + void *buf_read = NULL; + const void *buf_write = NULL; + bool direct = false; + struct completion *completion; + unsigned long to; + + this->ntransfers = 0; + for (i = 0; i < GPMI_MAX_TRANSFERS; i++) + this->transfers[i].direction = DMA_NONE; + + ret = pm_runtime_get_sync(this->dev); + if (ret < 0) + return ret; + + /* + * This driver currently supports only one NAND chip. Plus, dies share + * the same configuration. So once timings have been applied on the + * controller side, they will not change anymore. When the time will + * come, the check on must_apply_timings will have to be dropped. + */ + if (this->hw.must_apply_timings) { + this->hw.must_apply_timings = false; + gpmi_nfc_apply_timings(this); + } + + dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); + + for (i = 0; i < op->ninstrs; i++) { + instr = &op->instrs[i]; + + nand_op_trace(" ", instr); + + switch (instr->type) { + case NAND_OP_WAITRDY_INSTR: + desc = gpmi_chain_wait_ready(this); + break; + case NAND_OP_CMD_INSTR: + cmd = instr->ctx.cmd.opcode; + + /* + * When this command has an address cycle chain it + * together with the address cycle + */ + if (i + 1 != op->ninstrs && + op->instrs[i + 1].type == NAND_OP_ADDR_INSTR) + continue; + + desc = gpmi_chain_command(this, cmd, NULL, 0); + + break; + case NAND_OP_ADDR_INSTR: + desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs, + instr->ctx.addr.naddrs); + break; + case NAND_OP_DATA_OUT_INSTR: + buf_write = instr->ctx.data.buf.out; + buf_len = instr->ctx.data.len; + nbufs++; + + desc = gpmi_chain_data_write(this, buf_write, buf_len); + + break; + case NAND_OP_DATA_IN_INSTR: + if (!instr->ctx.data.len) + break; + buf_read = instr->ctx.data.buf.in; + buf_len = instr->ctx.data.len; + nbufs++; + + desc = gpmi_chain_data_read(this, buf_read, buf_len, + &direct); + break; + } + + if (!desc) { + ret = -ENXIO; + goto unmap; + } + } + + dev_dbg(this->dev, "%s setup done\n", __func__); + + if (nbufs > 1) { + dev_err(this->dev, "Multiple data instructions not supported\n"); + ret = -EINVAL; + goto unmap; + } + + if (this->bch) { + writel(this->bch_flashlayout0, + this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0); + writel(this->bch_flashlayout1, + this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); + } + + if (this->bch && buf_read) { + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, + this->resources.bch_regs + HW_BCH_CTRL_SET); + completion = &this->bch_done; + } else { + desc->callback = dma_irq_callback; + desc->callback_param = this; + completion = &this->dma_done; + } + + init_completion(completion); + + dmaengine_submit(desc); + dma_async_issue_pending(get_dma_chan(this)); + + to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000)); + if (!to) { + dev_err(this->dev, "DMA timeout, last DMA\n"); + gpmi_dump_info(this); + ret = -ETIMEDOUT; + goto unmap; + } + + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, + this->resources.bch_regs + HW_BCH_CTRL_CLR); + gpmi_clear_bch(this); + + ret = 0; + +unmap: + for (i = 0; i < this->ntransfers; i++) { + struct gpmi_transfer *transfer = &this->transfers[i]; + + if (transfer->direction != DMA_NONE) + dma_unmap_sg(this->dev, &transfer->sgl, 1, + transfer->direction); + } + + if (!ret && buf_read && !direct) + memcpy(buf_read, this->data_buffer_dma, + gpmi_raw_len_to_len(this, buf_len)); + + this->bch = false; + + pm_runtime_mark_last_busy(this->dev); + pm_runtime_put_autosuspend(this->dev); + + return ret; +} + static const struct nand_controller_ops gpmi_nand_controller_ops = { .attach_chip = gpmi_nand_attach_chip, .setup_data_interface = gpmi_setup_data_interface, + .exec_op = gpmi_nfc_exec_op, }; static int gpmi_nand_init(struct gpmi_nand_data *this) @@ -2800,9 +2553,6 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) struct mtd_info *mtd = nand_to_mtd(chip); int ret; - /* init current chip */ - this->current_chip = -1; - /* init the MTD data structures */ mtd->name = "gpmi-nand"; mtd->dev.parent = this->dev; @@ -2810,14 +2560,8 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ nand_set_controller_data(chip, this); nand_set_flash_node(chip, this->pdev->dev.of_node); - chip->legacy.select_chip = gpmi_select_chip; - chip->legacy.cmd_ctrl = gpmi_cmd_ctrl; - chip->legacy.dev_ready = gpmi_dev_ready; - chip->legacy.read_byte = gpmi_read_byte; - chip->legacy.read_buf = gpmi_read_buf; - chip->legacy.write_buf = gpmi_write_buf; - chip->badblock_pattern = &gpmi_bbt_descr; chip->legacy.block_markbad = gpmi_block_markbad; + chip->badblock_pattern = &gpmi_bbt_descr; chip->options |= NAND_NO_SUBPAGE_WRITE; /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ @@ -2833,7 +2577,10 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) if (ret) goto err_out; - chip->legacy.dummy_controller.ops = &gpmi_nand_controller_ops; + nand_controller_init(&this->base); + this->base.ops = &gpmi_nand_controller_ops; + chip->controller = &this->base; + ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1); if (ret) goto err_out; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h index 51a070da84ed..fdc5ed7de083 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h @@ -103,6 +103,14 @@ struct gpmi_nfc_hardware_timing { u32 ctrl1n; }; +#define GPMI_MAX_TRANSFERS 8 + +struct gpmi_transfer { + u8 cmdbuf[8]; + struct scatterlist sgl; + enum dma_data_direction direction; +}; + struct gpmi_nand_data { /* Devdata */ const struct gpmi_devdata *devdata; @@ -126,23 +134,18 @@ struct gpmi_nand_data { struct boot_rom_geometry rom_geometry; /* MTD / NAND */ + struct nand_controller base; struct nand_chip nand; - /* General-use Variables */ - int current_chip; - unsigned int command_length; + struct gpmi_transfer transfers[GPMI_MAX_TRANSFERS]; + int ntransfers; - struct scatterlist cmd_sgl; - char *cmd_buffer; + bool bch; + uint32_t bch_flashlayout0; + uint32_t bch_flashlayout1; - struct scatterlist data_sgl; char *data_buffer_dma; - unsigned int page_buffer_size; - - void *payload_virt; - dma_addr_t payload_phys; - void *auxiliary_virt; dma_addr_t auxiliary_phys; diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h index 4a33f2c8a682..069d9f5a609e 100644 --- a/include/linux/dma/mxs-dma.h +++ b/include/linux/dma/mxs-dma.h @@ -5,6 +5,7 @@ #include #define MXS_DMA_CTRL_WAIT4END BIT(31) +#define MXS_DMA_CTRL_WAIT4RDY BIT(30) /* * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words From edd3e620ab15e66b0d5477dc3bd4732a2b3baddb Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Tue, 21 May 2019 10:44:21 -0400 Subject: [PATCH 38/60] dt-bindings: mtd: brcmnand: Make nand-ecc-strength and nand-ecc-step-size optional nand-ecc-strength and nand-ecc-step-size can be made optional as brcmnand driver can support using raw NAND layer detected values. Signed-off-by: Kamal Dasu Reviewed-by: Rob Herring Signed-off-by: Miquel Raynal --- Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt index 0b7c3738b66c..0d952afa635d 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt @@ -101,10 +101,10 @@ Required properties: number (e.g., 0, 1, 2, etc.) - #address-cells : see partition.txt - #size-cells : see partition.txt -- nand-ecc-strength : see nand-controller.yaml -- nand-ecc-step-size : must be 512 or 1024. See nand-controller.yaml Optional properties: +- nand-ecc-strength : see nand-controller.yaml +- nand-ecc-step-size : must be 512 or 1024. See nand-controller.yaml - nand-on-flash-bbt : boolean, to enable the on-flash BBT for this chip-select. See nand-controller.yaml - brcm,nand-oob-sector-size : integer, to denote the spare area sector size From 78933218f5c658195124456904cc56e5b52988bd Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Tue, 21 May 2019 10:44:22 -0400 Subject: [PATCH 39/60] mtd: rawnand: brcmnand: fallback to detected ecc-strength, ecc-step-size This change supports nand-ecc-step-size and nand-ecc-strength fields in brcmnand DT node to be optional. see: Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt If both nand-ecc-strength and nand-ecc-step-size are not specified in device tree node for NAND, raw NAND layer does detect ECC information by reading ONFI extended parameter page for parts using ONFI >= 2.1. In case of non-ONFI NAND parts there could be a nand_id table entry with ECC information. If there is valid device tree entry for nand-ecc-strength and nand-ecc-step-size fields it still shall override the detected values. Signed-off-by: Kamal Dasu Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index f368d1cb89f4..93ac1b5b8862 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2136,6 +2136,17 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) return -EINVAL; } + if (chip->ecc.mode != NAND_ECC_NONE && + (!chip->ecc.size || !chip->ecc.strength)) { + if (chip->base.eccreq.step_size && chip->base.eccreq.strength) { + /* use detected ECC parameters */ + chip->ecc.size = chip->base.eccreq.step_size; + chip->ecc.strength = chip->base.eccreq.strength; + dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n", + chip->ecc.size, chip->ecc.strength); + } + } + switch (chip->ecc.size) { case 512: if (chip->ecc.algo == NAND_ECC_HAMMING) From bded033062396e67ffbb3111084cf7ea202473d5 Mon Sep 17 00:00:00 2001 From: Jeff Kletsky Date: Wed, 22 May 2019 15:05:53 -0700 Subject: [PATCH 40/60] mtd: spinand: Define macros for page-read ops with three-byte addresses The GigaDevice GD5F1GQ4UFxxG SPI NAND utilizes three-byte addresses for its page-read ops. http://www.gigadevice.com/datasheet/gd5f1gq4xfxxg/ Signed-off-by: Jeff Kletsky Reviewed-by: Frieder Schrempf Signed-off-by: Miquel Raynal --- include/linux/mtd/spinand.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 507f7e289bd1..8aa39ac41e8e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -68,30 +68,60 @@ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) +#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + #define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + #define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + #define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(2, addr, 2), \ SPI_MEM_OP_DUMMY(ndummy, 2), \ SPI_MEM_OP_DATA_IN(len, buf, 2)) +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ + SPI_MEM_OP_ADDR(3, addr, 2), \ + SPI_MEM_OP_DUMMY(ndummy, 2), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + #define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(2, addr, 4), \ SPI_MEM_OP_DUMMY(ndummy, 4), \ SPI_MEM_OP_DATA_IN(len, buf, 4)) +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ + SPI_MEM_OP_ADDR(3, addr, 4), \ + SPI_MEM_OP_DUMMY(ndummy, 4), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + #define SPINAND_PROG_EXEC_OP(addr) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ From 878844908e563a2f02b977bacd221c288e681c47 Mon Sep 17 00:00:00 2001 From: Jeff Kletsky Date: Wed, 22 May 2019 15:05:54 -0700 Subject: [PATCH 41/60] mtd: spinand: Add support for two-byte device IDs The GigaDevice GD5F1GQ4UFxxG SPI NAND utilizes two-byte device IDs. http://www.gigadevice.com/datasheet/gd5f1gq4xfxxg/ Signed-off-by: Jeff Kletsky Reviewed-by: Frieder Schrempf Signed-off-by: Miquel Raynal --- drivers/mtd/nand/spi/core.c | 2 +- include/linux/mtd/spinand.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 4c15bb58c623..556bfdb34455 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -845,7 +845,7 @@ spinand_select_op_variant(struct spinand_device *spinand, */ int spinand_match_and_init(struct spinand_device *spinand, const struct spinand_info *table, - unsigned int table_size, u8 devid) + unsigned int table_size, u16 devid) { struct nand_device *nand = spinand_to_nand(spinand); unsigned int i; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 8aa39ac41e8e..fbc0423bb4ae 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -290,7 +290,7 @@ struct spinand_ecc_info { */ struct spinand_info { const char *model; - u8 devid; + u16 devid; u32 flags; struct nand_memory_organization memorg; struct nand_ecc_req eccreq; @@ -452,7 +452,7 @@ static inline void spinand_set_of_node(struct spinand_device *spinand, int spinand_match_and_init(struct spinand_device *dev, const struct spinand_info *table, - unsigned int table_size, u8 devid); + unsigned int table_size, u16 devid); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); int spinand_select_target(struct spinand_device *spinand, unsigned int target); From cfd93d7c908e71d99996be93d2b031ad3fddc292 Mon Sep 17 00:00:00 2001 From: Jeff Kletsky Date: Wed, 22 May 2019 15:05:55 -0700 Subject: [PATCH 42/60] mtd: spinand: Add support for GigaDevice GD5F1GQ4UFxxG The GigaDevice GD5F1GQ4UFxxG SPI NAND is in current production devices and, while it has the same logical layout as the E-series devices, it differs in the SPI interfacing in significant ways. This support is contingent on previous commits to: * Add support for two-byte device IDs * Define macros for page-read ops with three-byte addresses http://www.gigadevice.com/datasheet/gd5f1gq4xfxxg/ Signed-off-by: Jeff Kletsky Reviewed-by: Frieder Schrempf Signed-off-by: Miquel Raynal --- drivers/mtd/nand/spi/gigadevice.c | 79 +++++++++++++++++++++++++------ 1 file changed, 64 insertions(+), 15 deletions(-) diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index e5586390026a..b0c26eb5e8b6 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -9,11 +9,17 @@ #include #define SPINAND_MFR_GIGADEVICE 0xC8 + #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) #define GD5FXGQ4UEXXG_REG_STATUS2 0xf0 +#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4) +#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4) +#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4) +#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) + static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -22,6 +28,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); +static SPINAND_OP_VARIANTS(read_cache_variants_f, + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0)); + static SPINAND_OP_VARIANTS(write_cache_variants, SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), SPINAND_PROG_LOAD(true, 0, NULL, 0)); @@ -59,6 +73,11 @@ static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section, return 0; } +static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { + .ecc = gd5fxgq4xa_ooblayout_ecc, + .free = gd5fxgq4xa_ooblayout_free, +}; + static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, u8 status) { @@ -83,7 +102,7 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, return -EINVAL; } -static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section, +static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { if (section) @@ -95,7 +114,7 @@ static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section, return 0; } -static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section, +static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { if (section) @@ -108,6 +127,11 @@ static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section, return 0; } +static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = { + .ecc = gd5fxgq4_variant2_ooblayout_ecc, + .free = gd5fxgq4_variant2_ooblayout_free, +}; + static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, u8 status) { @@ -150,15 +174,25 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, return -EINVAL; } -static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { - .ecc = gd5fxgq4xa_ooblayout_ecc, - .free = gd5fxgq4xa_ooblayout_free, -}; +static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand, + u8 status) +{ + switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) { + case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS: + return 0; -static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = { - .ecc = gd5fxgq4uexxg_ooblayout_ecc, - .free = gd5fxgq4uexxg_ooblayout_free, -}; + case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS: + return 3; + + case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR: + return -EBADMSG; + + default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */ + return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2; + } + + return -EINVAL; +} static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO("GD5F1GQ4xA", 0xF1, @@ -195,25 +229,40 @@ static const struct spinand_info gigadevice_spinand_table[] = { &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout, + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, gd5fxgq4uexxg_ecc_get_status)), + SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148, + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout, + gd5fxgq4ufxxg_ecc_get_status)), }; static int gigadevice_spinand_detect(struct spinand_device *spinand) { u8 *id = spinand->id.data; + u16 did; int ret; /* - * For GD NANDs, There is an address byte needed to shift in before IDs - * are read out, so the first byte in raw_id is dummy. + * Earlier GDF5-series devices (A,E) return [0][MID][DID] + * Later (F) devices return [MID][DID1][DID2] */ - if (id[1] != SPINAND_MFR_GIGADEVICE) + + if (id[0] == SPINAND_MFR_GIGADEVICE) + did = (id[1] << 8) + id[2]; + else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE) + did = id[2]; + else return 0; ret = spinand_match_and_init(spinand, gigadevice_spinand_table, ARRAY_SIZE(gigadevice_spinand_table), - id[2]); + did); if (ret) return ret; From db01077c5fffc73fc190d6ce3d68ae083044e4a2 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 23 May 2019 14:16:06 -0500 Subject: [PATCH 43/60] mtd: onenand: Avoid fall-through warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NOTICE THAT: "...we don't know whether we need fallthroughs or breaks here and this is just a change to avoid having new warnings when switching to -Wimplicit-fallthrough but this change might be entirely wrong."[1] See the original thread of discussion here: https://lore.kernel.org/patchwork/patch/1036251/ So, in preparation to enabling -Wimplicit-fallthrough, this patch silences the following warnings: drivers/mtd/nand/onenand/onenand_base.c: In function ‘onenand_check_features’: drivers/mtd/nand/onenand/onenand_base.c:3264:6: warning: this statement may fall through [-Wimplicit-fallthrough=] if (ONENAND_IS_DDP(this)) ^ drivers/mtd/nand/onenand/onenand_base.c:3284:2: note: here case ONENAND_DEVICE_DENSITY_2Gb: ^~~~ drivers/mtd/nand/onenand/onenand_base.c:3288:17: warning: this statement may fall through [-Wimplicit-fallthrough=] this->options |= ONENAND_HAS_UNLOCK_ALL; drivers/mtd/nand/onenand/onenand_base.c:3290:2: note: here case ONENAND_DEVICE_DENSITY_1Gb: ^~~~ Warning level 3 was used: -Wimplicit-fallthrough=3 Also, notice that this patch doesn't change any functionality. See the most recent thread of discussion here: https://lore.kernel.org/patchwork/patch/1077395/ This patch is part of the ongoing efforts to enable -Wimplicit-fallthrough. [1] https://lore.kernel.org/lkml/20190509085318.34a9d4be@xps13/ Cc: Miquel Raynal Suggested-by: Boris Brezillon Suggested-by: Kees Cook Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Signed-off-by: Miquel Raynal --- drivers/mtd/nand/onenand/onenand_base.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index 492c0059673d..ba46d0cf60a1 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -3282,12 +3282,15 @@ static void onenand_check_features(struct mtd_info *mtd) if ((this->version_id & 0xf) == 0xe) this->options |= ONENAND_HAS_NOP_1; } + this->options |= ONENAND_HAS_UNLOCK_ALL; + break; case ONENAND_DEVICE_DENSITY_2Gb: /* 2Gb DDP does not have 2 plane */ if (!ONENAND_IS_DDP(this)) this->options |= ONENAND_HAS_2PLANE; this->options |= ONENAND_HAS_UNLOCK_ALL; + break; case ONENAND_DEVICE_DENSITY_1Gb: /* A-Die has all block unlock */ From 33535b85478d253a234a2779f28992a4d1766629 Mon Sep 17 00:00:00 2001 From: Mason Yang Date: Mon, 3 Jun 2019 10:42:04 +0800 Subject: [PATCH 44/60] mtd: rawnand: Add Macronix NAND read retry support Add support for Macronix NAND read retry. Macronix NANDs support specific read operation for data recovery, which can be enabled with a SET_FEATURE. Driver checks byte 167 of Vendor Blocks in ONFI parameter page table to see if this high-reliability function is supported. Signed-off-by: Mason Yang Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/nand_macronix.c | 45 ++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index fad57c378dd2..58511aeb0c9a 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -8,6 +8,50 @@ #include "internals.h" +#define MACRONIX_READ_RETRY_BIT BIT(0) +#define MACRONIX_NUM_READ_RETRY_MODES 6 + +struct nand_onfi_vendor_macronix { + u8 reserved; + u8 reliability_func; +} __packed; + +static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode) +{ + u8 feature[ONFI_SUBFEATURE_PARAM_LEN]; + + if (!chip->parameters.supports_set_get_features || + !test_bit(ONFI_FEATURE_ADDR_READ_RETRY, + chip->parameters.set_feature_list)) + return -ENOTSUPP; + + feature[0] = mode; + return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature); +} + +static void macronix_nand_onfi_init(struct nand_chip *chip) +{ + struct nand_parameters *p = &chip->parameters; + struct nand_onfi_vendor_macronix *mxic; + + if (!p->onfi) + return; + + mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor; + if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0) + return; + + chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES; + chip->setup_read_retry = macronix_nand_setup_read_retry; + + if (p->supports_set_get_features) { + bitmap_set(p->set_feature_list, + ONFI_FEATURE_ADDR_READ_RETRY, 1); + bitmap_set(p->get_feature_list, + ONFI_FEATURE_ADDR_READ_RETRY, 1); + } +} + /* * Macronix AC series does not support using SET/GET_FEATURES to change * the timings unlike what is declared in the parameter page. Unflag @@ -56,6 +100,7 @@ static int macronix_nand_init(struct nand_chip *chip) chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; macronix_nand_fix_broken_get_timings(chip); + macronix_nand_onfi_init(chip); return 0; } From 9f897bfdd89f5f08a12fa263a7f57fbf8ad9292f Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Thu, 16 May 2019 12:41:46 -0400 Subject: [PATCH 45/60] mtd: Add flag to indicate panic_write Added a flag to indicate a panic_write so that low level drivers can use it to take required action where applicable, to ensure oops data gets written to assigned mtd device. Signed-off-by: Kamal Dasu Signed-off-by: Miquel Raynal --- drivers/mtd/mtdcore.c | 3 +++ include/linux/mtd/mtd.h | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 453242d6cf56..408615f29e57 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -1124,6 +1124,9 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, return -EROFS; if (!len) return 0; + if (!mtd->oops_panic_write) + mtd->oops_panic_write = true; + return mtd->_panic_write(mtd, to, len, retlen, buf); } EXPORT_SYMBOL_GPL(mtd_panic_write); diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 936a3fdb48b5..4ca8c1c845fb 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -316,6 +316,12 @@ struct mtd_info { int (*_get_device) (struct mtd_info *mtd); void (*_put_device) (struct mtd_info *mtd); + /* + * flag indicates a panic write, low level drivers can take appropriate + * action if required to ensure writes go through + */ + bool oops_panic_write; + struct notifier_block reboot_notifier; /* default mode before reboot */ /* ECC status information */ From c1ac2dc34b51a9d2191067bbf2aebc8e8ddada70 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Thu, 16 May 2019 12:41:47 -0400 Subject: [PATCH 46/60] mtd: rawnand: brcmnand: When oops in progress use pio and interrupt polling If mtd_oops is in progress, switch to polling during NAND command completion instead of relying on DMA/interrupts so that the mtd_oops buffer can be completely written in the assigned NAND partition. Signed-off-by: Kamal Dasu Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 48 ++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 93ac1b5b8862..e7b955a79996 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -151,6 +151,7 @@ struct brcmnand_controller { u32 nand_cs_nand_xor; u32 corr_stat_threshold; u32 flash_dma_mode; + bool pio_poll_mode; }; struct brcmnand_cfg { @@ -815,6 +816,20 @@ static inline bool has_flash_dma(struct brcmnand_controller *ctrl) return ctrl->flash_dma_base; } +static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl) +{ + if (ctrl->pio_poll_mode) + return; + + if (has_flash_dma(ctrl)) { + ctrl->flash_dma_base = 0; + disable_irq(ctrl->dma_irq); + } + + disable_irq(ctrl->irq); + ctrl->pio_poll_mode = true; +} + static inline bool flash_dma_buf_ok(const void *buf) { return buf && !is_vmalloc_addr(buf) && @@ -1229,15 +1244,42 @@ static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat, /* intentionally left blank */ } +static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip) +{ + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_controller *ctrl = host->ctrl; + struct mtd_info *mtd = nand_to_mtd(chip); + bool err = false; + int sts; + + if (mtd->oops_panic_write) { + /* switch to interrupt polling and PIO mode */ + disable_ctrl_irqs(ctrl); + sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, + NAND_CTRL_RDY, 0); + err = (sts < 0) ? true : false; + } else { + unsigned long timeo = msecs_to_jiffies( + NAND_POLL_STATUS_TIMEOUT_MS); + /* wait for completion interrupt */ + sts = wait_for_completion_timeout(&ctrl->done, timeo); + err = (sts <= 0) ? true : false; + } + + return err; +} + static int brcmnand_waitfunc(struct nand_chip *chip) { struct brcmnand_host *host = nand_get_controller_data(chip); struct brcmnand_controller *ctrl = host->ctrl; - unsigned long timeo = msecs_to_jiffies(100); + bool err = false; dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); - if (ctrl->cmd_pending && - wait_for_completion_timeout(&ctrl->done, timeo) <= 0) { + if (ctrl->cmd_pending) + err = brcmstb_nand_wait_for_completion(chip); + + if (err) { u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START) >> brcmnand_cmd_shift(ctrl); From 3c7c1e4594efd57b98ae6f7298f40cff4f4fb47b Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Tue, 4 Jun 2019 10:36:29 -0400 Subject: [PATCH 47/60] mtd: rawnand: brcmnand: Refactored code to introduce helper functions Refactored NAND ECC and CMD address configuration code to use helper functions. Signed-off-by: Kamal Dasu Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 100 ++++++++++++++--------- 1 file changed, 62 insertions(+), 38 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index e7b955a79996..357ef269337b 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -581,6 +581,54 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl, __raw_writel(val, ctrl->nand_fc + word * 4); } +static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl) +{ + + /* Clear error addresses */ + brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0); + brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0); + brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0); + brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0); +} + +static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl) +{ + u64 err_addr; + + err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR); + err_addr |= ((u64)(brcmnand_read_reg(ctrl, + BRCMNAND_UNCORR_EXT_ADDR) + & 0xffff) << 32); + + return err_addr; +} + +static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl) +{ + u64 err_addr; + + err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR); + err_addr |= ((u64)(brcmnand_read_reg(ctrl, + BRCMNAND_CORR_EXT_ADDR) + & 0xffff) << 32); + + return err_addr; +} + +static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_controller *ctrl = host->ctrl; + + brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, + (host->cs << 16) | ((addr >> 32) & 0xffff)); + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); + brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, + lower_32_bits(addr)); + (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); +} + static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, enum brcmnand_cs_reg reg) { @@ -1220,9 +1268,12 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) { struct brcmnand_controller *ctrl = host->ctrl; int ret; + u64 cmd_addr; + + cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); + + dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr); - dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd, - brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS)); BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; @@ -1408,12 +1459,7 @@ static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command, if (!native_cmd) return; - brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, - (host->cs << 16) | ((addr >> 32) & 0xffff)); - (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); - brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr)); - (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); - + brcmnand_set_cmd_addr(mtd, addr); brcmnand_send_cmd(host, native_cmd); brcmnand_waitfunc(chip); @@ -1631,20 +1677,10 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, struct brcmnand_controller *ctrl = host->ctrl; int i, j, ret = 0; - /* Clear error addresses */ - brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0); - brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0); - brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0); - brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0); - - brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, - (host->cs << 16) | ((addr >> 32) & 0xffff)); - (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); + brcmnand_clear_ecc_addr(ctrl); for (i = 0; i < trans; i++, addr += FC_BYTES) { - brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, - lower_32_bits(addr)); - (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); + brcmnand_set_cmd_addr(mtd, addr); /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */ brcmnand_send_cmd(host, CMD_PAGE_READ); brcmnand_waitfunc(chip); @@ -1664,21 +1700,15 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, host->hwcfg.sector_size_1k); if (!ret) { - *err_addr = brcmnand_read_reg(ctrl, - BRCMNAND_UNCORR_ADDR) | - ((u64)(brcmnand_read_reg(ctrl, - BRCMNAND_UNCORR_EXT_ADDR) - & 0xffff) << 32); + *err_addr = brcmnand_get_uncorrecc_addr(ctrl); + if (*err_addr) ret = -EBADMSG; } if (!ret) { - *err_addr = brcmnand_read_reg(ctrl, - BRCMNAND_CORR_ADDR) | - ((u64)(brcmnand_read_reg(ctrl, - BRCMNAND_CORR_EXT_ADDR) - & 0xffff) << 32); + *err_addr = brcmnand_get_correcc_addr(ctrl); + if (*err_addr) ret = -EUCLEAN; } @@ -1745,7 +1775,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); try_dmaread: - brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); + brcmnand_clear_ecc_addr(ctrl); if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES, @@ -1892,15 +1922,9 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip, goto out; } - brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, - (host->cs << 16) | ((addr >> 32) & 0xffff)); - (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); - for (i = 0; i < trans; i++, addr += FC_BYTES) { /* full address MUST be set before populating FC */ - brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, - lower_32_bits(addr)); - (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); + brcmnand_set_cmd_addr(mtd, addr); if (buf) { brcmnand_soc_data_bus_prepare(ctrl->soc, false); From 0c06da5788be2304ea23d348fd59cd43df8db7f8 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Tue, 4 Jun 2019 10:36:30 -0400 Subject: [PATCH 48/60] mtd: rawnand: brcmnand: Add support for v7.3 controller This change adds support for brcm NAND v7.3 controller. This controller uses a newer version of flash_dma engine and change mostly implements these differences. Signed-off-by: Kamal Dasu Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 102 ++++++++++++++++++----- 1 file changed, 80 insertions(+), 22 deletions(-) diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 357ef269337b..33310b8a6eb8 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -84,6 +84,12 @@ struct brcm_nand_dma_desc { #define FLASH_DMA_ECC_ERROR (1 << 8) #define FLASH_DMA_CORR_ERROR (1 << 9) +/* Bitfields for DMA_MODE */ +#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */ +#define FLASH_DMA_MODE_MODE BIT(0) /* link list */ +#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \ + FLASH_DMA_MODE_MODE) + /* 512B flash cache in the NAND controller HW */ #define FC_SHIFT 9U #define FC_BYTES 512U @@ -96,6 +102,51 @@ struct brcm_nand_dma_desc { #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) #define NAND_POLL_STATUS_TIMEOUT_MS 100 +/* flash_dma registers */ +enum flash_dma_reg { + FLASH_DMA_REVISION = 0, + FLASH_DMA_FIRST_DESC, + FLASH_DMA_FIRST_DESC_EXT, + FLASH_DMA_CTRL, + FLASH_DMA_MODE, + FLASH_DMA_STATUS, + FLASH_DMA_INTERRUPT_DESC, + FLASH_DMA_INTERRUPT_DESC_EXT, + FLASH_DMA_ERROR_STATUS, + FLASH_DMA_CURRENT_DESC, + FLASH_DMA_CURRENT_DESC_EXT, +}; + +/* flash_dma registers v1*/ +static const u16 flash_dma_regs_v1[] = { + [FLASH_DMA_REVISION] = 0x00, + [FLASH_DMA_FIRST_DESC] = 0x04, + [FLASH_DMA_FIRST_DESC_EXT] = 0x08, + [FLASH_DMA_CTRL] = 0x0c, + [FLASH_DMA_MODE] = 0x10, + [FLASH_DMA_STATUS] = 0x14, + [FLASH_DMA_INTERRUPT_DESC] = 0x18, + [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c, + [FLASH_DMA_ERROR_STATUS] = 0x20, + [FLASH_DMA_CURRENT_DESC] = 0x24, + [FLASH_DMA_CURRENT_DESC_EXT] = 0x28, +}; + +/* flash_dma registers v4 */ +static const u16 flash_dma_regs_v4[] = { + [FLASH_DMA_REVISION] = 0x00, + [FLASH_DMA_FIRST_DESC] = 0x08, + [FLASH_DMA_FIRST_DESC_EXT] = 0x0c, + [FLASH_DMA_CTRL] = 0x10, + [FLASH_DMA_MODE] = 0x14, + [FLASH_DMA_STATUS] = 0x18, + [FLASH_DMA_INTERRUPT_DESC] = 0x20, + [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24, + [FLASH_DMA_ERROR_STATUS] = 0x28, + [FLASH_DMA_CURRENT_DESC] = 0x30, + [FLASH_DMA_CURRENT_DESC_EXT] = 0x34, +}; + /* Controller feature flags */ enum { BRCMNAND_HAS_1K_SECTORS = BIT(0), @@ -128,6 +179,8 @@ struct brcmnand_controller { /* List of NAND hosts (one for each chip-select) */ struct list_head host_list; + /* flash_dma reg */ + const u16 *flash_dma_offsets; struct brcm_nand_dma_desc *dma_desc; dma_addr_t dma_pa; @@ -463,7 +516,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) /* Register offsets */ if (ctrl->nand_version >= 0x0702) ctrl->reg_offsets = brcmnand_regs_v72; - else if (ctrl->nand_version >= 0x0701) + else if (ctrl->nand_version == 0x0701) ctrl->reg_offsets = brcmnand_regs_v71; else if (ctrl->nand_version >= 0x0600) ctrl->reg_offsets = brcmnand_regs_v60; @@ -508,7 +561,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } /* Maximum spare area sector size (per 512B) */ - if (ctrl->nand_version >= 0x0702) + if (ctrl->nand_version == 0x0702) ctrl->max_oob = 128; else if (ctrl->nand_version >= 0x0600) ctrl->max_oob = 64; @@ -539,6 +592,15 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) return 0; } +static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl) +{ + /* flash_dma register offsets */ + if (ctrl->nand_version >= 0x0703) + ctrl->flash_dma_offsets = flash_dma_regs_v4; + else + ctrl->flash_dma_offsets = flash_dma_regs_v1; +} + static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl, enum brcmnand_reg reg) { @@ -661,7 +723,7 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; int cs = host->cs; - if (ctrl->nand_version >= 0x0702) + if (ctrl->nand_version == 0x0702) bits = 7; else if (ctrl->nand_version >= 0x0600) bits = 6; @@ -715,7 +777,7 @@ enum { static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) { - if (ctrl->nand_version >= 0x0702) + if (ctrl->nand_version == 0x0702) return GENMASK(7, 0); else if (ctrl->nand_version >= 0x0600) return GENMASK(6, 0); @@ -845,20 +907,6 @@ static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) * Flash DMA ***********************************************************************/ -enum flash_dma_reg { - FLASH_DMA_REVISION = 0x00, - FLASH_DMA_FIRST_DESC = 0x04, - FLASH_DMA_FIRST_DESC_EXT = 0x08, - FLASH_DMA_CTRL = 0x0c, - FLASH_DMA_MODE = 0x10, - FLASH_DMA_STATUS = 0x14, - FLASH_DMA_INTERRUPT_DESC = 0x18, - FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c, - FLASH_DMA_ERROR_STATUS = 0x20, - FLASH_DMA_CURRENT_DESC = 0x24, - FLASH_DMA_CURRENT_DESC_EXT = 0x28, -}; - static inline bool has_flash_dma(struct brcmnand_controller *ctrl) { return ctrl->flash_dma_base; @@ -884,14 +932,19 @@ static inline bool flash_dma_buf_ok(const void *buf) likely(IS_ALIGNED((uintptr_t)buf, 4)); } -static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs, - u32 val) +static inline void flash_dma_writel(struct brcmnand_controller *ctrl, + enum flash_dma_reg dma_reg, u32 val) { + u16 offs = ctrl->flash_dma_offsets[dma_reg]; + brcmnand_writel(val, ctrl->flash_dma_base + offs); } -static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs) +static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, + enum flash_dma_reg dma_reg) { + u16 offs = ctrl->flash_dma_offsets[dma_reg]; + return brcmnand_readl(ctrl->flash_dma_base + offs); } @@ -2472,6 +2525,7 @@ static const struct of_device_id brcmnand_of_match[] = { { .compatible = "brcm,brcmnand-v7.0" }, { .compatible = "brcm,brcmnand-v7.1" }, { .compatible = "brcm,brcmnand-v7.2" }, + { .compatible = "brcm,brcmnand-v7.3" }, {}, }; MODULE_DEVICE_TABLE(of, brcmnand_of_match); @@ -2558,7 +2612,11 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) goto err; } - flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */ + /* initialize the dma version */ + brcmnand_flash_dma_revision_init(ctrl); + + /* linked-list and stop on error */ + flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK); flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); /* Allocate descriptor(s) */ From 3054354ffe096f748410107e6e2c00165e7a17e5 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Tue, 4 Jun 2019 10:36:31 -0400 Subject: [PATCH 49/60] dt-bindings: mtd: brcmnand: Add brcmnand, brcmnand-v7.3 support Added brcm,brcmnand-v7.3 as possible compatible string to support brcmnand controller v7.3. Signed-off-by: Kamal Dasu Reviewed-by: Rob Herring Signed-off-by: Miquel Raynal --- Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt index 0d952afa635d..82156dc8f304 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt @@ -28,6 +28,7 @@ Required properties: brcm,brcmnand-v7.0 brcm,brcmnand-v7.1 brcm,brcmnand-v7.2 + brcm,brcmnand-v7.3 brcm,brcmnand - reg : the register start and length for NAND register region. (optional) Flash DMA register range (if present) From 6bd2a42aa67a8aa20f62f1cabe6930ef1eaf389b Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 4 Jun 2019 11:58:59 +0100 Subject: [PATCH 50/60] mtd: rawnand: gpmi: remove double assignment to block_size The variable block_size is being assigned to itself and to geo->ecc_chunk_size. Clean up the double assignment by removing the assignment to itself. Addresses-Coverity: ("Evaluation order violation") Signed-off-by: Colin Ian King Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 5db84178edff..334fe3130285 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -1428,7 +1428,7 @@ static void gpmi_bch_layout_std(struct gpmi_nand_data *this) struct bch_geometry *geo = &this->bch_geometry; unsigned int ecc_strength = geo->ecc_strength >> 1; unsigned int gf_len = geo->gf_len; - unsigned int block_size = block_size = geo->ecc_chunk_size; + unsigned int block_size = geo->ecc_chunk_size; this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) | From b74e6985bfe81627b7b1adb8443220f98f89f9f4 Mon Sep 17 00:00:00 2001 From: Xiaolei Li Date: Mon, 24 Jun 2019 09:38:56 +0800 Subject: [PATCH 51/60] mtd: rawnand: mtk: Re-license MTK NAND driver as Dual MIT/GPL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is wanted to use MTK NAND driver with GPL-2.0 or MIT license. But now it is only licensed as GPL-2.0, so re-license it as dual MIT/GPL. Signed-off-by: Xiaolei Li Acked-by: Jorge Ramirez-Ortiz Acked-by: Ryder Lee Acked-by: Rafał Miłecki Acked-by: Miquel Raynal Acked-by: Boris Brezillon Acked-by: Wen Yang Acked-by: Dan Carpenter Acked-by: Arnd Bergmann Acked-by: Masahiro Yamada Acked-by: RogerCC Lin Reviewed-by: Matthias Brugger Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/mtk_ecc.c | 4 ++-- drivers/mtd/nand/raw/mtk_ecc.h | 4 +--- drivers/mtd/nand/raw/mtk_nand.c | 4 ++-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 0f90e060dae8..74595b644b7c 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: GPL-2.0 OR MIT /* * MTK ECC controller driver. * Copyright (C) 2016 MediaTek Inc. @@ -596,4 +596,4 @@ module_platform_driver(mtk_ecc_driver); MODULE_AUTHOR("Xiaolei Li "); MODULE_DESCRIPTION("MTK Nand ECC Driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/mtd/nand/raw/mtk_ecc.h b/drivers/mtd/nand/raw/mtk_ecc.h index a455df080952..0e48c36e6ca0 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.h +++ b/drivers/mtd/nand/raw/mtk_ecc.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * MTK SDG1 ECC controller * * Copyright (c) 2016 Mediatek * Authors: Xiaolei Li * Jorge Ramirez-Ortiz - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. */ #ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index 47291bc53350..373d47d1ba4c 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: GPL-2.0 OR MIT /* * MTK NAND Flash controller driver. * Copyright (C) 2016 MediaTek Inc. @@ -1645,6 +1645,6 @@ static struct platform_driver mtk_nfc_driver = { module_platform_driver(mtk_nfc_driver); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("Dual MIT/GPL"); MODULE_AUTHOR("Xiaolei Li "); MODULE_DESCRIPTION("MTK Nand Flash Controller Driver"); From 3552691616c940a7c4125c2678ba816653cd725e Mon Sep 17 00:00:00 2001 From: Jeff Kletsky Date: Tue, 18 Jun 2019 10:08:05 -0700 Subject: [PATCH 52/60] mtd: spinand: Add initial support for Paragon PN26G0xA Add initial support for Paragon Technology PN26G01Axxxxx and PN26G02Axxxxx SPI NAND Datasheets available at http://www.xtxtech.com/upfile/2016082517274590.pdf http://www.xtxtech.com/upfile/2016082517282329.pdf Signed-off-by: Jeff Kletsky Reviewed-by: Frieder Schrempf Signed-off-by: Miquel Raynal --- drivers/mtd/nand/spi/Makefile | 2 +- drivers/mtd/nand/spi/core.c | 1 + drivers/mtd/nand/spi/paragon.c | 147 +++++++++++++++++++++++++++++++++ include/linux/mtd/spinand.h | 1 + 4 files changed, 150 insertions(+), 1 deletion(-) create mode 100644 drivers/mtd/nand/spi/paragon.c diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile index 753125082640..9662b9c1d5a9 100644 --- a/drivers/mtd/nand/spi/Makefile +++ b/drivers/mtd/nand/spi/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -spinand-objs := core.o gigadevice.o macronix.o micron.o toshiba.o winbond.o +spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o obj-$(CONFIG_MTD_SPI_NAND) += spinand.o diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 556bfdb34455..f0f3528aab8f 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -757,6 +757,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = { &gigadevice_spinand_manufacturer, ¯onix_spinand_manufacturer, µn_spinand_manufacturer, + ¶gon_spinand_manufacturer, &toshiba_spinand_manufacturer, &winbond_spinand_manufacturer, }; diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c new file mode 100644 index 000000000000..52307681cbd0 --- /dev/null +++ b/drivers/mtd/nand/spi/paragon.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Jeff Kletsky + * + * Author: Jeff Kletsky + */ + +#include +#include +#include + + +#define SPINAND_MFR_PARAGON 0xa1 + + +#define PN26G0XA_STATUS_ECC_BITMASK (3 << 4) + +#define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4) +#define PN26G0XA_STATUS_ECC_1_7_CORRECTED (1 << 4) +#define PN26G0XA_STATUS_ECC_ERRORED (2 << 4) +#define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4) + + +static SPINAND_OP_VARIANTS(read_cache_variants, + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); + +static SPINAND_OP_VARIANTS(write_cache_variants, + SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), + SPINAND_PROG_LOAD(true, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(update_cache_variants, + SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), + SPINAND_PROG_LOAD(false, 0, NULL, 0)); + + +static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section > 3) + return -ERANGE; + + region->offset = 6 + (15 * section); /* 4 BBM + 2 user bytes */ + region->length = 13; + + return 0; +} + +static int pn26g0xa_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section > 4) + return -ERANGE; + + if (section == 4) { + region->offset = 64; + region->length = 64; + } else { + region->offset = 4 + (15 * section); + region->length = 2; + } + + return 0; +} + +static int pn26g0xa_ecc_get_status(struct spinand_device *spinand, + u8 status) +{ + switch (status & PN26G0XA_STATUS_ECC_BITMASK) { + case PN26G0XA_STATUS_ECC_NONE_DETECTED: + return 0; + + case PN26G0XA_STATUS_ECC_1_7_CORRECTED: + return 7; /* Return upper limit by convention */ + + case PN26G0XA_STATUS_ECC_8_CORRECTED: + return 8; + + case PN26G0XA_STATUS_ECC_ERRORED: + return -EBADMSG; + + default: + break; + } + + return -EINVAL; +} + +static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = { + .ecc = pn26g0xa_ooblayout_ecc, + .free = pn26g0xa_ooblayout_free, +}; + + +static const struct spinand_info paragon_spinand_table[] = { + SPINAND_INFO("PN26G01A", 0xe1, + NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&pn26g0xa_ooblayout, + pn26g0xa_ecc_get_status)), + SPINAND_INFO("PN26G02A", 0xe2, + NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&pn26g0xa_ooblayout, + pn26g0xa_ecc_get_status)), +}; + +static int paragon_spinand_detect(struct spinand_device *spinand) +{ + u8 *id = spinand->id.data; + int ret; + + /* Read ID returns [0][MID][DID] */ + + if (id[1] != SPINAND_MFR_PARAGON) + return 0; + + ret = spinand_match_and_init(spinand, paragon_spinand_table, + ARRAY_SIZE(paragon_spinand_table), + id[2]); + if (ret) + return ret; + + return 1; +} + +static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = { + .detect = paragon_spinand_detect, +}; + +const struct spinand_manufacturer paragon_spinand_manufacturer = { + .id = SPINAND_MFR_PARAGON, + .name = "Paragon", + .ops = ¶gon_spinand_manuf_ops, +}; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index fbc0423bb4ae..4ea558bd3c46 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -227,6 +227,7 @@ struct spinand_manufacturer { extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; +extern const struct spinand_manufacturer paragon_spinand_manufacturer; extern const struct spinand_manufacturer toshiba_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; From b83408b580eccf8d2797cd6cb9ae42c2a28656a7 Mon Sep 17 00:00:00 2001 From: liaoweixiong Date: Fri, 28 Jun 2019 12:14:46 +0800 Subject: [PATCH 53/60] mtd: spinand: read returns badly if the last page has bitflips In case of the last page containing bitflips (ret > 0), spinand_mtd_read() will return that number of bitflips for the last page while it should instead return max_bitflips like it does when the last page read returns with 0. Signed-off-by: Weixiong Liao Reviewed-by: Boris Brezillon Reviewed-by: Frieder Schrempf Cc: stable@vger.kernel.org Fixes: 7529df465248 ("mtd: nand: Add core infrastructure to support SPI NANDs") Signed-off-by: Miquel Raynal --- drivers/mtd/nand/spi/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index f0f3528aab8f..89f6beefb01c 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -511,12 +511,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, if (ret == -EBADMSG) { ecc_failed = true; mtd->ecc_stats.failed++; - ret = 0; } else { mtd->ecc_stats.corrected += ret; max_bitflips = max_t(unsigned int, max_bitflips, ret); } + ret = 0; ops->retlen += iter.req.datalen; ops->oobretlen += iter.req.ooblen; } From b07079f1642c28dac4f6f339d5aca66203519734 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Tue, 25 Jun 2019 13:27:46 +0530 Subject: [PATCH 54/60] mtd: hyperbus: Add driver for TI's HyperBus memory controller Add driver for HyperBus memory controller on TI's AM654 SoC. Programming IP is pretty simple and provides direct memory mapped access to connected Flash devices. Add basic support for the IP without DMA. Second chip-select is not supported for now. Signed-off-by: Vignesh Raghavendra Signed-off-by: Miquel Raynal --- drivers/mtd/hyperbus/Kconfig | 12 +++ drivers/mtd/hyperbus/Makefile | 1 + drivers/mtd/hyperbus/hbmc-am654.c | 147 ++++++++++++++++++++++++++++++ 3 files changed, 160 insertions(+) create mode 100644 drivers/mtd/hyperbus/hbmc-am654.c diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig index 98147e28caa0..cff6bbd226f5 100644 --- a/drivers/mtd/hyperbus/Kconfig +++ b/drivers/mtd/hyperbus/Kconfig @@ -9,3 +9,15 @@ menuconfig MTD_HYPERBUS the HyperBus Controller driver to communicate with HyperFlash. See Cypress HyperBus specification for more details + +if MTD_HYPERBUS + +config HBMC_AM654 + tristate "HyperBus controller driver for AM65x SoC" + select MULTIPLEXER + select MUX_MMIO + help + This is the driver for HyperBus controller on TI's AM65x and + other SoCs + +endif # MTD_HYPERBUS diff --git a/drivers/mtd/hyperbus/Makefile b/drivers/mtd/hyperbus/Makefile index ca61dedd730d..8a936e066f48 100644 --- a/drivers/mtd/hyperbus/Makefile +++ b/drivers/mtd/hyperbus/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MTD_HYPERBUS) += hyperbus-core.o +obj-$(CONFIG_HBMC_AM654) += hbmc-am654.o diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c new file mode 100644 index 000000000000..08d543b124cd --- /dev/null +++ b/drivers/mtd/hyperbus/hbmc-am654.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Author: Vignesh Raghavendra + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AM654_HBMC_CALIB_COUNT 25 + +struct am654_hbmc_priv { + struct hyperbus_ctlr ctlr; + struct hyperbus_device hbdev; + struct mux_control *mux_ctrl; +}; + +static int am654_hbmc_calibrate(struct hyperbus_device *hbdev) +{ + struct map_info *map = &hbdev->map; + struct cfi_private cfi; + int count = AM654_HBMC_CALIB_COUNT; + int pass_count = 0; + int ret; + + cfi.interleave = 1; + cfi.device_type = CFI_DEVICETYPE_X16; + cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL); + cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL); + + while (count--) { + ret = cfi_qry_present(map, 0, &cfi); + if (ret) + pass_count++; + else + pass_count = 0; + if (pass_count == 5) + break; + } + + cfi_qry_mode_off(0, map, &cfi); + + return ret; +} + +static const struct hyperbus_ops am654_hbmc_ops = { + .calibrate = am654_hbmc_calibrate, +}; + +static int am654_hbmc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct am654_hbmc_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + if (of_property_read_bool(dev->of_node, "mux-controls")) { + struct mux_control *control = devm_mux_control_get(dev, NULL); + + if (IS_ERR(control)) + return PTR_ERR(control); + + ret = mux_control_select(control, 1); + if (ret) { + dev_err(dev, "Failed to select HBMC mux\n"); + return ret; + } + priv->mux_ctrl = control; + } + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + goto disable_pm; + } + + priv->ctlr.dev = dev; + priv->ctlr.ops = &am654_hbmc_ops; + priv->hbdev.ctlr = &priv->ctlr; + priv->hbdev.np = of_get_next_child(dev->of_node, NULL); + ret = hyperbus_register_device(&priv->hbdev); + if (ret) { + dev_err(dev, "failed to register controller\n"); + pm_runtime_put_sync(&pdev->dev); + goto disable_pm; + } + + return 0; +disable_pm: + pm_runtime_disable(dev); + if (priv->mux_ctrl) + mux_control_deselect(priv->mux_ctrl); + return ret; +} + +static int am654_hbmc_remove(struct platform_device *pdev) +{ + struct am654_hbmc_priv *priv = platform_get_drvdata(pdev); + int ret; + + ret = hyperbus_unregister_device(&priv->hbdev); + if (priv->mux_ctrl) + mux_control_deselect(priv->mux_ctrl); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static const struct of_device_id am654_hbmc_dt_ids[] = { + { + .compatible = "ti,am654-hbmc", + }, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids); + +static struct platform_driver am654_hbmc_platform_driver = { + .probe = am654_hbmc_probe, + .remove = am654_hbmc_remove, + .driver = { + .name = "hbmc-am654", + .of_match_table = am654_hbmc_dt_ids, + }, +}; + +module_platform_driver(am654_hbmc_platform_driver); + +MODULE_DESCRIPTION("HBMC driver for AM654 SoC"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:hbmc-am654"); +MODULE_AUTHOR("Vignesh Raghavendra "); From 17c929e1334ee0647ce9f3aba1d6bc645c1e5206 Mon Sep 17 00:00:00 2001 From: Fuqian Huang Date: Fri, 28 Jun 2019 10:48:13 +0800 Subject: [PATCH 55/60] mtd: rawnand: Use kzalloc() instead of kmalloc() and memset() Replace kmalloc() by a memset() followed with a kzalloc(). There is a recommendation to use zeroing allocator rather than allocator followed by memset(0) in ./scripts/coccinelle/api/alloc/zalloc-simple.cocci Signed-off-by: Fuqian Huang Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/nand_bch.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c index 55aa4c1cd414..17527310c3a1 100644 --- a/drivers/mtd/nand/raw/nand_bch.c +++ b/drivers/mtd/nand/raw/nand_bch.c @@ -170,7 +170,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) goto fail; } - nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL); + nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL); nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL); if (!nbc->eccmask || !nbc->errloc) goto fail; @@ -182,7 +182,6 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) goto fail; memset(erased_page, 0xff, eccsize); - memset(nbc->eccmask, 0, eccbytes); encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); kfree(erased_page); From bce9437a0a48dd5e19490f56e1cdc39a9be5563c Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Fri, 28 Jun 2019 11:18:06 +0200 Subject: [PATCH 56/60] mtd: rawnand: stm32_fmc2: increase DMA completion timeouts When the system is overloaded, DMA data transfer completion occurs after 100ms. Increase the timeouts to let it the time to complete. Signed-off-by: Amelie Delaunay Signed-off-by: Christophe Kerello Signed-off-by: Miquel Raynal --- drivers/mtd/nand/raw/stm32_fmc2_nand.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 4aabea25cd7d..e63acc077c18 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -37,6 +37,8 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) +#define FMC2_TIMEOUT_MS 1000 + /* Timings */ #define FMC2_THIZ 1 #define FMC2_TIO 8000 @@ -530,7 +532,8 @@ static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data, int ret; ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR, - sr, sr & FMC2_SR_NWRF, 10, 1000); + sr, sr & FMC2_SR_NWRF, 10, + FMC2_TIMEOUT_MS); if (ret) { dev_err(fmc2->dev, "ham timeout\n"); return ret; @@ -611,7 +614,7 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, /* Wait until the BCH code is ready */ if (!wait_for_completion_timeout(&fmc2->complete, - msecs_to_jiffies(1000))) { + msecs_to_jiffies(FMC2_TIMEOUT_MS))) { dev_err(fmc2->dev, "bch timeout\n"); stm32_fmc2_disable_bch_irq(fmc2); return -ETIMEDOUT; @@ -696,7 +699,7 @@ static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat, /* Wait until the decoding error is ready */ if (!wait_for_completion_timeout(&fmc2->complete, - msecs_to_jiffies(1000))) { + msecs_to_jiffies(FMC2_TIMEOUT_MS))) { dev_err(fmc2->dev, "bch timeout\n"); stm32_fmc2_disable_bch_irq(fmc2); return -ETIMEDOUT; @@ -969,7 +972,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, /* Wait end of sequencer transfer */ if (!wait_for_completion_timeout(&fmc2->complete, - msecs_to_jiffies(1000))) { + msecs_to_jiffies(FMC2_TIMEOUT_MS))) { dev_err(fmc2->dev, "seq timeout\n"); stm32_fmc2_disable_seq_irq(fmc2); dmaengine_terminate_all(dma_ch); @@ -981,7 +984,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, /* Wait DMA data transfer completion */ if (!wait_for_completion_timeout(&fmc2->dma_data_complete, - msecs_to_jiffies(100))) { + msecs_to_jiffies(FMC2_TIMEOUT_MS))) { dev_err(fmc2->dev, "data DMA timeout\n"); dmaengine_terminate_all(dma_ch); ret = -ETIMEDOUT; @@ -990,7 +993,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, /* Wait DMA ECC transfer completion */ if (!write_data && !raw) { if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete, - msecs_to_jiffies(100))) { + msecs_to_jiffies(FMC2_TIMEOUT_MS))) { dev_err(fmc2->dev, "ECC DMA timeout\n"); dmaengine_terminate_all(fmc2->dma_ecc_ch); ret = -ETIMEDOUT; From 63898ab5159f6349f58b23cbf9cd06a824d88527 Mon Sep 17 00:00:00 2001 From: Ding Xiang Date: Wed, 29 May 2019 15:18:22 +0800 Subject: [PATCH 57/60] mtd: afs: remove unneeded NULL check NULL check before kfree is unneeded, so remove it. Signed-off-by: Ding Xiang Signed-off-by: Richard Weinberger --- drivers/mtd/parsers/afs.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c index f24d768eee30..752b6cf005f7 100644 --- a/drivers/mtd/parsers/afs.c +++ b/drivers/mtd/parsers/afs.c @@ -371,8 +371,7 @@ static int parse_afs_partitions(struct mtd_info *mtd, out_free_parts: while (i >= 0) { - if (parts[i].name) - kfree(parts[i].name); + kfree(parts[i].name); i--; } kfree(parts); From df616d7a442b90798d63fbf4447154bbbb9040b1 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 27 Jun 2019 16:07:45 +0900 Subject: [PATCH 58/60] mtd: abi: do not use C++ style comments in uapi header Linux kernel tolerates C++ style comments these days. Actually, the SPDX License tags for .c files start with //. On the other hand, uapi headers are written in more strict C, where the C++ comment style is forbidden. Signed-off-by: Masahiro Yamada Acked-by: Richard Weinberger Signed-off-by: Richard Weinberger --- include/uapi/mtd/mtd-abi.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index aff5b5e59845..47ffe3208c27 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -113,11 +113,11 @@ struct mtd_write_req { #define MTD_CAP_NVRAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE) /* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */ -#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended) -#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode) -#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme -#define MTD_NANDECC_PLACEONLY 3 // Use the given placement in the structure (Do not store ecc result on read) -#define MTD_NANDECC_AUTOPL_USR 4 // Use the given autoplacement scheme rather than using the default +#define MTD_NANDECC_OFF 0 /* Switch off ECC (Not recommended) */ +#define MTD_NANDECC_PLACE 1 /* Use the given placement in the structure (YAFFS1 legacy mode) */ +#define MTD_NANDECC_AUTOPLACE 2 /* Use the default placement scheme */ +#define MTD_NANDECC_PLACEONLY 3 /* Use the given placement in the structure (Do not store ecc result on read) */ +#define MTD_NANDECC_AUTOPL_USR 4 /* Use the given autoplacement scheme rather than using the default */ /* OTP mode selection */ #define MTD_OTP_OFF 0 From 6a08a2f12781ca788759ff73acf1f36f6ae3f9b7 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Thu, 23 May 2019 11:19:47 +1200 Subject: [PATCH 59/60] mtd: concat: refactor concat_lock/concat_unlock concat_lock() and concat_unlock() only differed in terms of the mtd_xx operation they called. Refactor them to use a common helper function and pass a boolean flag to indicate whether lock or unlock is needed. Signed-off-by: Chris Packham Signed-off-by: Richard Weinberger --- drivers/mtd/mtdconcat.c | 44 +++++++++++------------------------------ 1 file changed, 12 insertions(+), 32 deletions(-) diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 7324ff832b41..c9785f96070e 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -437,7 +437,8 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) return err; } -static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len, + bool is_lock) { struct mtd_concat *concat = CONCAT(mtd); int i, err = -EINVAL; @@ -456,7 +457,10 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) else size = len; - err = mtd_lock(subdev, ofs, size); + if (is_lock) + err = mtd_lock(subdev, ofs, size); + else + err = mtd_unlock(subdev, ofs, size); if (err) break; @@ -471,38 +475,14 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return err; } +static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + return concat_xxlock(mtd, ofs, len, true); +} + static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { - struct mtd_concat *concat = CONCAT(mtd); - int i, err = 0; - - for (i = 0; i < concat->num_subdev; i++) { - struct mtd_info *subdev = concat->subdev[i]; - uint64_t size; - - if (ofs >= subdev->size) { - size = 0; - ofs -= subdev->size; - continue; - } - if (ofs + len > subdev->size) - size = subdev->size - ofs; - else - size = len; - - err = mtd_unlock(subdev, ofs, size); - if (err) - break; - - len -= size; - if (len == 0) - break; - - err = -EINVAL; - ofs = 0; - } - - return err; + return concat_xxlock(mtd, ofs, len, false); } static void concat_sync(struct mtd_info *mtd) From 3bb4bba79254be87c86ef416d6b7f69882e6a02c Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Thu, 23 May 2019 11:19:48 +1200 Subject: [PATCH 60/60] mtd: concat: implement _is_locked mtd operation Add an implementation of the _is_locked operation for concatenated mtd devices. This doesn't handle getting the lock status of a range that spans chips, which is consistent with cfi_ppb_is_locked and cfi_intelext_is_locked which only look at the first block in the range. Signed-off-by: Chris Packham Signed-off-by: Richard Weinberger --- drivers/mtd/mtdconcat.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index c9785f96070e..170a7221b35f 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -485,6 +485,28 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return concat_xxlock(mtd, ofs, len, false); } +static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + struct mtd_concat *concat = CONCAT(mtd); + int i, err = -EINVAL; + + for (i = 0; i < concat->num_subdev; i++) { + struct mtd_info *subdev = concat->subdev[i]; + + if (ofs >= subdev->size) { + ofs -= subdev->size; + continue; + } + + if (ofs + len > subdev->size) + break; + + return mtd_is_locked(subdev, ofs, len); + } + + return err; +} + static void concat_sync(struct mtd_info *mtd) { struct mtd_concat *concat = CONCAT(mtd); @@ -684,6 +706,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c concat->mtd._sync = concat_sync; concat->mtd._lock = concat_lock; concat->mtd._unlock = concat_unlock; + concat->mtd._is_locked = concat_is_locked; concat->mtd._suspend = concat_suspend; concat->mtd._resume = concat_resume;