mtd: sh_flctl: Group sector accesses into a single transfer

When we use hardware ecc, the flctl is run in so-called "sector access
mode". We can bundle 4 sector accesses when using 2KiB page sizes to read
a whole page at once and speed up things.

Signed-off-by: Bastian Hecht <hechtb@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
Bastian Hecht 2012-05-14 14:14:45 +02:00 committed by David Woodhouse
parent 50ed399cc3
commit 623c55caa3

View file

@ -368,25 +368,21 @@ static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
struct sh_flctl *flctl = mtd_to_flctl(mtd); struct sh_flctl *flctl = mtd_to_flctl(mtd);
int sector, page_sectors; int sector, page_sectors;
if (flctl->page_size) page_sectors = flctl->page_size ? 4 : 1;
page_sectors = 4;
else
page_sectors = 1;
writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
FLCMNCR(flctl));
set_cmd_regs(mtd, NAND_CMD_READ0, set_cmd_regs(mtd, NAND_CMD_READ0,
(NAND_CMD_READSTART << 8) | NAND_CMD_READ0); (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
FLCMNCR(flctl));
writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
writel(page_addr << 2, FLADR(flctl));
empty_fifo(flctl);
start_translation(flctl);
for (sector = 0; sector < page_sectors; sector++) { for (sector = 0; sector < page_sectors; sector++) {
int ret; int ret;
empty_fifo(flctl);
writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl));
writel(page_addr << 2 | sector, FLADR(flctl));
start_translation(flctl);
read_fiforeg(flctl, 512, 512 * sector); read_fiforeg(flctl, 512, 512 * sector);
ret = read_ecfiforeg(flctl, ret = read_ecfiforeg(flctl,
@ -397,8 +393,10 @@ static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
flctl->hwecc_cant_correct[sector] = 1; flctl->hwecc_cant_correct[sector] = 1;
writel(0x0, FL4ECCCR(flctl)); writel(0x0, FL4ECCCR(flctl));
wait_completion(flctl);
} }
wait_completion(flctl);
writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT), writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
FLCMNCR(flctl)); FLCMNCR(flctl));
} }
@ -430,31 +428,27 @@ static void execmd_write_page_sector(struct mtd_info *mtd)
int i, page_addr = flctl->seqin_page_addr; int i, page_addr = flctl->seqin_page_addr;
int sector, page_sectors; int sector, page_sectors;
if (flctl->page_size) page_sectors = flctl->page_size ? 4 : 1;
page_sectors = 4;
else
page_sectors = 1;
writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
set_cmd_regs(mtd, NAND_CMD_PAGEPROG, set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN); (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
for (sector = 0; sector < page_sectors; sector++) { empty_fifo(flctl);
empty_fifo(flctl); writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
writel(readl(FLCMDCR(flctl)) | 1, FLCMDCR(flctl)); writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
writel(page_addr << 2 | sector, FLADR(flctl)); writel(page_addr << 2, FLADR(flctl));
start_translation(flctl);
start_translation(flctl); for (sector = 0; sector < page_sectors; sector++) {
write_fiforeg(flctl, 512, 512 * sector); write_fiforeg(flctl, 512, 512 * sector);
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
wait_wecfifo_ready(flctl); /* wait for write ready */ wait_wecfifo_ready(flctl); /* wait for write ready */
writel(0xFFFFFFFF, FLECFIFO(flctl)); writel(0xFFFFFFFF, FLECFIFO(flctl));
} }
wait_completion(flctl);
} }
wait_completion(flctl);
writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl)); writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
} }