diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 150772395cc6..372e921389c8 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -59,6 +60,7 @@ static unsigned int fmax = 515633; * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register * @pwrreg_powerup: power up value for MMCIPOWER register * @signal_direction: input/out direction of bus signals can be indicated + * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock */ struct variant_data { unsigned int clkreg; @@ -71,6 +73,7 @@ struct variant_data { bool blksz_datactrl16; u32 pwrreg_powerup; bool signal_direction; + bool pwrreg_clkgate; }; static struct variant_data variant_arm = { @@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = { .pwrreg_powerup = MCI_PWR_UP, }; +static struct variant_data variant_arm_extended_fifo_hwfc = { + .fifosize = 128 * 4, + .fifohalfsize = 64 * 4, + .clkreg_enable = MCI_ARM_HWFCEN, + .datalength_bits = 16, + .pwrreg_powerup = MCI_PWR_UP, +}; + static struct variant_data variant_u300 = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, @@ -95,6 +106,7 @@ static struct variant_data variant_u300 = { .sdio = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_nomadik = { @@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = { .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_ux500 = { @@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = { .st_clkdiv = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; static struct variant_data variant_ux500v2 = { @@ -131,8 +145,27 @@ static struct variant_data variant_ux500v2 = { .blksz_datactrl16 = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .pwrreg_clkgate = true, }; +/* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + if (!data) + return 0; + + if (!is_power_of_2(data->blksz)) { + dev_err(mmc_dev(host->mmc), + "unsupported block size (%d bytes)\n", data->blksz); + return -EINVAL; + } + + return 0; +} + /* * This must be called with host->lock held */ @@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) clk |= MCI_ST_8BIT_BUS; + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + clk |= MCI_ST_UX500_NEG_EDGE; + mmci_write_clkreg(host, clk); } @@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host) host->dma_rx_channel = host->dma_tx_channel = NULL; } +static void mmci_dma_data_error(struct mmci_host *host) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; + host->data->host_cookie = 0; +} + static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { - struct dma_chan *chan = host->dma_current; + struct dma_chan *chan; enum dma_data_direction dir; + + if (data->flags & MMC_DATA_READ) { + dir = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + dir = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); +} + +static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ u32 status; int i; @@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) * contiguous buffers. On TX, we'll get a FIFO underrun error. */ if (status & MCI_RXDATAAVLBLMASK) { - dmaengine_terminate_all(chan); + mmci_dma_data_error(host); if (!data->error) data->error = -EIO; } - if (data->flags & MMC_DATA_WRITE) { - dir = DMA_TO_DEVICE; - } else { - dir = DMA_FROM_DEVICE; - } - if (!data->host_cookie) - dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + mmci_dma_unmap(host, data); /* * Use of DMA with scatter-gather is impossible. @@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); mmci_dma_release(host); } + + host->dma_current = NULL; + host->dma_desc_current = NULL; } -static void mmci_dma_data_error(struct mmci_host *host) -{ - dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); - dmaengine_terminate_all(host->dma_current); -} - -static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, - struct mmci_host_next *next) +/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ +static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, + struct dma_chan **dma_chan, + struct dma_async_tx_descriptor **dma_desc) { struct variant_data *variant = host->variant; struct dma_slave_config conf = { @@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, enum dma_data_direction buffer_dirn; int nr_sg; - /* Check if next job is already prepared */ - if (data->host_cookie && !next && - host->dma_current && host->dma_desc_current) - return 0; - - if (!next) { - host->dma_current = NULL; - host->dma_desc_current = NULL; - } - if (data->flags & MMC_DATA_READ) { conf.direction = DMA_DEV_TO_MEM; buffer_dirn = DMA_FROM_DEVICE; @@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, if (!desc) goto unmap_exit; - if (next) { - next->dma_chan = chan; - next->dma_desc = desc; - } else { - host->dma_current = chan; - host->dma_desc_current = desc; - } + *dma_chan = chan; + *dma_desc = desc; return 0; unmap_exit: - if (!next) - dmaengine_terminate_all(chan); dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); return -ENOMEM; } +static inline int mmci_dma_prep_data(struct mmci_host *host, + struct mmc_data *data) +{ + /* Check if next job is already prepared. */ + if (host->dma_current && host->dma_desc_current) + return 0; + + /* No job were prepared thus do it now. */ + return __mmci_dma_prep_data(host, data, &host->dma_current, + &host->dma_desc_current); +} + +static inline int mmci_dma_prep_next(struct mmci_host *host, + struct mmc_data *data) +{ + struct mmci_host_next *nd = &host->next_data; + return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); +} + static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) { int ret; struct mmc_data *data = host->data; - ret = mmci_dma_prep_data(host, host->data, NULL); + ret = mmci_dma_prep_data(host, host->data); if (ret) return ret; @@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) { struct mmci_host_next *next = &host->next_data; - if (data->host_cookie && data->host_cookie != next->cookie) { - pr_warning("[%s] invalid cookie: data->host_cookie %d" - " host->next_data.cookie %d\n", - __func__, data->host_cookie, host->next_data.cookie); - data->host_cookie = 0; - } - - if (!data->host_cookie) - return; + WARN_ON(data->host_cookie && data->host_cookie != next->cookie); + WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); host->dma_desc_current = next->dma_desc; host->dma_current = next->dma_chan; - next->dma_desc = NULL; next->dma_chan = NULL; } @@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, if (!data) return; - if (data->host_cookie) { - data->host_cookie = 0; - return; - } + BUG_ON(data->host_cookie); - /* if config for dma */ - if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || - ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { - if (mmci_dma_prep_data(host, data, nd)) - data->host_cookie = 0; - else - data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; - } + if (mmci_validate_data(host, data)) + return; + + if (!mmci_dma_prep_next(host, data)) + data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; } static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, @@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, { struct mmci_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - struct dma_chan *chan; - enum dma_data_direction dir; - if (!data) + if (!data || !data->host_cookie) return; - if (data->flags & MMC_DATA_READ) { - dir = DMA_FROM_DEVICE; - chan = host->dma_rx_channel; - } else { - dir = DMA_TO_DEVICE; - chan = host->dma_tx_channel; - } + mmci_dma_unmap(host, data); + if (err) { + struct mmci_host_next *next = &host->next_data; + struct dma_chan *chan; + if (data->flags & MMC_DATA_READ) + chan = host->dma_rx_channel; + else + chan = host->dma_tx_channel; + dmaengine_terminate_all(chan); - /* if config for dma */ - if (chan) { - if (err) - dmaengine_terminate_all(chan); - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, dir); - mrq->data->host_cookie = 0; + next->dma_desc = NULL; + next->dma_chan = NULL; } } @@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { } +static inline void mmci_dma_finalize(struct mmci_host *host, + struct mmc_data *data) +{ +} + static inline void mmci_dma_data_error(struct mmci_host *host) { } @@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) mmci_write_clkreg(host, clk); } + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) + datactrl |= MCI_ST_DPSM_DDRMODE; + /* * Attempt to use DMA operation mode, if this * should fail, fall back to PIO mode @@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, u32 remain, success; /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, data); + } /* * Calculate how far we are into the transfer. Note that @@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & MCI_DATAEND || data->error) { if (dma_inprogress(host)) - mmci_dma_unmap(host, data); + mmci_dma_finalize(host, data); mmci_stop_data(host); if (!data->error) @@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, if (!cmd->data || cmd->error) { if (host->data) { /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, host->data); + } mmci_stop_data(host); } mmci_request_end(host, cmd->mrq); @@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) WARN_ON(host->mrq != NULL); - if (mrq->data && !is_power_of_2(mrq->data->blksz)) { - dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", - mrq->data->blksz); - mrq->cmd->error = -EINVAL; + mrq->cmd->error = mmci_validate_data(host, mrq->data); + if (mrq->cmd->error) { mmc_request_done(mmc, mrq); return; } @@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct variant_data *variant = host->variant; u32 pwr = 0; unsigned long flags; - int ret; pm_runtime_get_sync(mmc_dev(mmc)); @@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: - if (host->vcc) - ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); break; case MMC_POWER_UP: - if (host->vcc) { - ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); - if (ret) { - dev_err(mmc_dev(mmc), "unable to set OCR\n"); - /* - * The .set_ios() function in the mmc_host_ops - * struct return void, and failing to set the - * power should be rare so we print an error - * and return here. - */ - goto out; - } - } + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + /* * The ST Micro variant doesn't have the PL180s MCI_PWR_UP * and instead uses MCI_PWR_ON so apply whatever value is @@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } + /* + * If clock = 0 and the variant requires the MMCIPOWER to be used for + * gating the clock, the MCI_PWR_ON bit is cleared. + */ + if (!ios->clock && variant->pwrreg_clkgate) + pwr &= ~MCI_PWR_ON; + spin_lock_irqsave(&host->lock, flags); mmci_set_clkreg(host, ios->clock); @@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_unlock_irqrestore(&host->lock, flags); - out: pm_runtime_mark_last_busy(mmc_dev(mmc)); pm_runtime_put_autosuspend(mmc_dev(mmc)); } @@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev, } else dev_warn(&dev->dev, "could not get default pinstate\n"); -#ifdef CONFIG_REGULATOR - /* If we're using the regulator framework, try to fetch a regulator */ - host->vcc = regulator_get(&dev->dev, "vmmc"); - if (IS_ERR(host->vcc)) - host->vcc = NULL; - else { - int mask = mmc_regulator_get_ocrmask(host->vcc); - - if (mask < 0) - dev_err(&dev->dev, "error getting OCR mask (%d)\n", - mask); - else { - host->mmc->ocr_avail = (u32) mask; - if (plat->ocr_mask) - dev_warn(&dev->dev, - "Provided ocr_mask/setpower will not be used " - "(using regulator instead)\n"); - } - } -#endif - /* Fall back to platform data if no regulator is found */ - if (host->vcc == NULL) + /* Get regulators and the supported OCR mask */ + mmc_regulator_get_supply(mmc); + if (!mmc->ocr_avail) mmc->ocr_avail = plat->ocr_mask; + else if (plat->ocr_mask) + dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); + mmc->caps = plat->capabilities; mmc->caps2 = plat->capabilities2; + /* We support these PM capabilities. */ + mmc->pm_caps = MMC_PM_KEEP_POWER; + /* * We can do SGIO */ @@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev) clk_disable_unprepare(host->clk); clk_put(host->clk); - if (host->vcc) - mmc_regulator_set_ocr(mmc, host->vcc, 0); - regulator_put(host->vcc); - mmc_free_host(mmc); amba_release_regions(dev); @@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev) } #endif +#ifdef CONFIG_PM_RUNTIME +static int mmci_runtime_suspend(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_disable_unprepare(host->clk); + } + + return 0; +} + +static int mmci_runtime_resume(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_prepare_enable(host->clk); + } + + return 0; +} +#endif + static const struct dev_pm_ops mmci_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) + SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; static struct amba_id mmci_ids[] = { @@ -1651,6 +1702,11 @@ static struct amba_id mmci_ids[] = { .mask = 0xff0fffff, .data = &variant_arm_extended_fifo, }, + { + .id = 0x02041180, + .mask = 0xff0fffff, + .data = &variant_arm_extended_fifo_hwfc, + }, { .id = 0x00041181, .mask = 0x000fffff, diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index d34d8c0add8e..1f33ad5333a0 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -28,6 +28,8 @@ #define MCI_ST_UX500_NEG_EDGE (1 << 13) #define MCI_ST_UX500_HWFCEN (1 << 14) #define MCI_ST_UX500_CLK_INV (1 << 15) +/* Modified PL180 on Versatile Express platform */ +#define MCI_ARM_HWFCEN (1 << 12) #define MMCIARGUMENT 0x008 #define MMCICOMMAND 0x00c @@ -193,7 +195,6 @@ struct mmci_host { /* pio stuff */ struct sg_mapping_iter sg_miter; unsigned int size; - struct regulator *vcc; /* pinctrl handles */ struct pinctrl *pinctrl;