1
0
Fork 0

ARM: imx: add suspend/resume with FastMix OFF support

This patch adds suspend/resume with FastMix OFF support.

Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
5.4-rM2-2.2.x-imx-squashed
Anson Huang 2019-04-18 12:54:29 +08:00 committed by Dong Aisheng
parent 1f6b6e8a89
commit 4d546d4a1e
11 changed files with 2072 additions and 254 deletions

View File

@ -50,6 +50,9 @@ config HAVE_IMX_DDRC
config HAVE_IMX_BUSFREQ
bool
config HAVE_IMX_MU
bool
config HAVE_IMX_SRC
def_bool y if SMP
select ARCH_HAS_RESET_CONTROLLER
@ -519,6 +522,8 @@ config SOC_IMX6SX
bool "i.MX6 SoloX support"
select PINCTRL_IMX6SX
select SOC_IMX6
select HAVE_IMX_MU
select KEYBOARD_SNVS_PWRKEY
help
This enables support for Freescale i.MX6 SoloX processor.
@ -554,6 +559,8 @@ config SOC_IMX7D_CA7
select HAVE_IMX_SRC
select IMX_GPCV2
select HAVE_IMX_DDRC
select HAVE_IMX_MU
select KEYBOARD_SNVS_PWRKEY
config SOC_IMX7D_CM4
bool

View File

@ -73,6 +73,7 @@ obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
obj-$(CONFIG_HAVE_IMX_DDRC) += ddrc.o
obj-$(CONFIG_HAVE_IMX_MU) += mu.o
ifneq ($(CONFIG_SOC_IMX6)$(CONFIG_SOC_LS1021A),)
AFLAGS_headsmp.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += headsmp.o platsmp.o

View File

@ -4,6 +4,7 @@
* Copyright 2017-2018 NXP.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
@ -16,38 +17,63 @@
#define REG_SET 0x4
#define REG_CLR 0x8
#define ANADIG_ARM_PLL 0x60
#define ANADIG_DDR_PLL 0x70
#define ANADIG_SYS_PLL 0xb0
#define ANADIG_ENET_PLL 0xe0
#define ANADIG_AUDIO_PLL 0xf0
#define ANADIG_VIDEO_PLL 0x130
#define ANADIG_REG_2P5 0x130
#define ANADIG_REG_CORE 0x140
#define ANADIG_ANA_MISC0 0x150
#define ANADIG_USB1_CHRG_DETECT 0x1b0
#define ANADIG_USB2_CHRG_DETECT 0x210
#define ANADIG_ANA_MISC2 0x170
#define ANADIG_DIGPROG 0x260
#define ANADIG_DIGPROG_IMX6SL 0x280
#define ANADIG_DIGPROG_IMX7D 0x800
#define SRC_SBMR2 0x1c
#define BM_ANADIG_REG_2P5_ENABLE_WEAK_LINREG 0x40000
#define BM_ANADIG_REG_2P5_ENABLE_PULLDOWN 0x8
#define BM_ANADIG_REG_CORE_FET_ODRIVE 0x20000000
#define BM_ANADIG_REG_CORE_REG1 (0x1f << 9)
#define BM_ANADIG_REG_CORE_REG2 (0x1f << 18)
#define BP_ANADIG_REG_CORE_REG2 (18)
#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG 0x1000
#define BM_ANADIG_ANA_MISC0_V2_STOP_MODE_CONFIG 0x800
#define BM_ANADIG_ANA_MISC0_V3_STOP_MODE_CONFIG 0xc00
#define BM_ANADIG_ANA_MISC2_REG1_STEP_TIME (0x3 << 26)
#define BP_ANADIG_ANA_MISC2_REG1_STEP_TIME (26)
/* Below MISC0_DISCON_HIGH_SNVS is only for i.MX6SL */
#define BM_ANADIG_ANA_MISC0_DISCON_HIGH_SNVS 0x2000
/* Since i.MX6SX, DISCON_HIGH_SNVS is changed to bit 12 */
#define BM_ANADIG_ANA_MISC0_V2_DISCON_HIGH_SNVS 0x1000
#define BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B 0x80000
#define BM_ANADIG_USB_CHRG_DETECT_EN_B 0x100000
#define LDO_RAMP_UP_UNIT_IN_CYCLES 64 /* 64 cycles per step */
#define LDO_RAMP_UP_FREQ_IN_MHZ 24 /* cycle based on 24M OSC */
static struct regmap *anatop;
static void imx_anatop_enable_weak2p5(bool enable)
{
u32 reg, val;
u32 reg, val, mask;
regmap_read(anatop, ANADIG_ANA_MISC0, &val);
if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
|| cpu_is_imx6sll())
mask = BM_ANADIG_ANA_MISC0_V3_STOP_MODE_CONFIG;
else if (cpu_is_imx6sl())
mask = BM_ANADIG_ANA_MISC0_V2_STOP_MODE_CONFIG;
else
mask = BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG;
/* can only be enabled when stop_mode_config is clear. */
reg = ANADIG_REG_2P5;
reg += (enable && (val & BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG) == 0) ?
REG_SET : REG_CLR;
reg += (enable && (val & mask) == 0) ? REG_SET : REG_CLR;
regmap_write(anatop, reg, BM_ANADIG_REG_2P5_ENABLE_WEAK_LINREG);
}
@ -65,35 +91,89 @@ static inline void imx_anatop_enable_2p5_pulldown(bool enable)
static inline void imx_anatop_disconnect_high_snvs(bool enable)
{
regmap_write(anatop, ANADIG_ANA_MISC0 + (enable ? REG_SET : REG_CLR),
BM_ANADIG_ANA_MISC0_DISCON_HIGH_SNVS);
if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull() ||
cpu_is_imx6sll())
regmap_write(anatop, ANADIG_ANA_MISC0 +
(enable ? REG_SET : REG_CLR),
BM_ANADIG_ANA_MISC0_V2_DISCON_HIGH_SNVS);
else
regmap_write(anatop, ANADIG_ANA_MISC0 +
(enable ? REG_SET : REG_CLR),
BM_ANADIG_ANA_MISC0_DISCON_HIGH_SNVS);
}
static void imx_anatop_disable_pu(bool off)
{
u32 val, soc, delay;
if (off) {
regmap_read(anatop, ANADIG_REG_CORE, &val);
val &= ~BM_ANADIG_REG_CORE_REG1;
regmap_write(anatop, ANADIG_REG_CORE, val);
} else {
/* track vddpu with vddsoc */
regmap_read(anatop, ANADIG_REG_CORE, &val);
soc = val & BM_ANADIG_REG_CORE_REG2;
val &= ~BM_ANADIG_REG_CORE_REG1;
val |= soc >> 9;
regmap_write(anatop, ANADIG_REG_CORE, val);
/* wait PU LDO ramp */
regmap_read(anatop, ANADIG_ANA_MISC2, &val);
val &= BM_ANADIG_ANA_MISC2_REG1_STEP_TIME;
val >>= BP_ANADIG_ANA_MISC2_REG1_STEP_TIME;
delay = (soc >> BP_ANADIG_REG_CORE_REG2) *
(LDO_RAMP_UP_UNIT_IN_CYCLES << val) /
LDO_RAMP_UP_FREQ_IN_MHZ + 1;
udelay(delay);
}
}
void imx_anatop_pre_suspend(void)
{
if (imx_mmdc_get_ddr_type() == IMX_DDR_TYPE_LPDDR2)
imx_anatop_enable_2p5_pulldown(true);
else
imx_anatop_enable_weak2p5(true);
if (cpu_is_imx7d()) {
/* PLL and PFDs overwrite set */
regmap_write(anatop, ANADIG_ARM_PLL + REG_SET, 1 << 20);
regmap_write(anatop, ANADIG_DDR_PLL + REG_SET, 1 << 19);
regmap_write(anatop, ANADIG_SYS_PLL + REG_SET, 0x1ff << 17);
regmap_write(anatop, ANADIG_ENET_PLL + REG_SET, 1 << 13);
regmap_write(anatop, ANADIG_AUDIO_PLL + REG_SET, 1 << 24);
regmap_write(anatop, ANADIG_VIDEO_PLL + REG_SET, 1 << 24);
return;
}
if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
imx_anatop_disable_pu(true);
imx_anatop_enable_weak2p5(true);
imx_anatop_enable_fet_odrive(true);
if (cpu_is_imx6sl())
if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() ||
cpu_is_imx6ull() || cpu_is_imx6sll())
imx_anatop_disconnect_high_snvs(true);
}
void imx_anatop_post_resume(void)
{
if (imx_mmdc_get_ddr_type() == IMX_DDR_TYPE_LPDDR2)
imx_anatop_enable_2p5_pulldown(false);
else
imx_anatop_enable_weak2p5(false);
if (cpu_is_imx7d()) {
/* PLL and PFDs overwrite clear */
regmap_write(anatop, ANADIG_ARM_PLL + REG_CLR, 1 << 20);
regmap_write(anatop, ANADIG_DDR_PLL + REG_CLR, 1 << 19);
regmap_write(anatop, ANADIG_SYS_PLL + REG_CLR, 0x1ff << 17);
regmap_write(anatop, ANADIG_ENET_PLL + REG_CLR, 1 << 13);
regmap_write(anatop, ANADIG_AUDIO_PLL + REG_CLR, 1 << 24);
regmap_write(anatop, ANADIG_VIDEO_PLL + REG_CLR, 1 << 24);
return;
}
if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
imx_anatop_disable_pu(false);
imx_anatop_enable_weak2p5(false);
imx_anatop_enable_fet_odrive(false);
if (cpu_is_imx6sl())
if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul() ||
cpu_is_imx6ull() || cpu_is_imx6sll())
imx_anatop_disconnect_high_snvs(false);
}
static void imx_anatop_usb_chrg_detect_disable(void)
@ -110,10 +190,11 @@ void __init imx_init_revision_from_anatop(void)
{
struct device_node *np;
void __iomem *anatop_base;
void __iomem *src_base;
unsigned int revision;
u32 digprog;
u32 digprog, sbmr2 = 0;
u16 offset = ANADIG_DIGPROG;
u8 major_part, minor_part;
u16 major_part, minor_part;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
anatop_base = of_iomap(np, 0);
@ -125,6 +206,20 @@ void __init imx_init_revision_from_anatop(void)
digprog = readl_relaxed(anatop_base + offset);
iounmap(anatop_base);
if ((digprog >> 16) == MXC_CPU_IMX6ULL) {
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-src");
if (np) {
src_base = of_iomap(np, 0);
WARN_ON(!src_base);
sbmr2 = readl_relaxed(src_base + 0x1c);
iounmap(src_base);
}
if (sbmr2 & (1 << 6)) {
digprog &= ~(0xff << 16);
digprog |= (MXC_CPU_IMX6ULZ << 16);
}
}
/*
* On i.MX7D digprog value match linux version format, so
* it needn't map again and we can use register value directly.
@ -144,24 +239,6 @@ void __init imx_init_revision_from_anatop(void)
major_part = (digprog >> 8) & 0xf;
minor_part = digprog & 0xf;
revision = ((major_part + 1) << 4) | minor_part;
if ((digprog >> 16) == MXC_CPU_IMX6ULL) {
void __iomem *src_base;
u32 sbmr2;
np = of_find_compatible_node(NULL, NULL,
"fsl,imx6ul-src");
src_base = of_iomap(np, 0);
WARN_ON(!src_base);
sbmr2 = readl_relaxed(src_base + SRC_SBMR2);
iounmap(src_base);
/* src_sbmr2 bit 6 is to identify if it is i.MX6ULZ */
if (sbmr2 & (1 << 6)) {
digprog &= ~(0xff << 16);
digprog |= (MXC_CPU_IMX6ULZ << 16);
}
}
}
mxc_set_cpu_type(digprog >> 16 & 0xff);

View File

@ -8,6 +8,7 @@
#define __ASM_ARCH_MXC_COMMON_H__
#include <linux/reboot.h>
#include <soc/imx/src.h>
struct irq_data;
struct platform_device;
@ -89,6 +90,29 @@ void imx_smp_prepare(void);
static inline void imx_scu_map_io(void) {}
static inline void imx_smp_prepare(void) {}
#endif
void imx6sx_set_m4_highfreq(bool high_freq);
void imx_mu_enable_m4_irqs_in_gic(bool enable);
#ifdef CONFIG_HAVE_IMX_GPC
void imx_gpc_add_m4_wake_up_irq(u32 irq, bool enable);
unsigned int imx_gpc_is_m4_sleeping(void);
#else
static inline void imx_gpc_add_m4_wake_up_irq(u32 irq, bool enable) {}
static inline unsigned int imx_gpc_is_m4_sleeping(void) { return 0; }
#endif
#ifdef CONFIG_HAVE_IMX_GPCV2
int imx_gpcv2_mf_power_on(unsigned int irq, unsigned int on);
void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn);
void imx_gpcv2_add_m4_wake_up_irq(u32 hwirq, bool enable);
#else
static inline int imx_gpcv2_mf_power_on(unsigned int irq, unsigned int on) { return 0; }
static inline void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn) {}
static inline void imx_gpcv2_add_m4_wake_up_irq(u32 hwirq, bool enable) {}
#endif
void imx_gpc_hold_m4_in_sleep(void);
void imx_gpc_release_m4_in_sleep(void);
bool imx_mu_is_m4_in_low_freq(void);
bool imx_mu_is_m4_in_stop(void);
void imx_mu_set_m4_run_mode(void);
void imx_src_init(void);
void imx_gpc_pre_suspend(bool arm_power_off);
void imx_gpc_post_resume(void);
@ -96,13 +120,20 @@ void imx_gpc_mask_all(void);
void imx_gpc_restore_all(void);
void imx_gpc_hwirq_mask(unsigned int hwirq);
void imx_gpc_hwirq_unmask(unsigned int hwirq);
unsigned int imx_gpc_is_mf_mix_off(void);
void imx_anatop_init(void);
void imx_anatop_pre_suspend(void);
void imx_anatop_post_resume(void);
int imx6_set_lpm(enum mxc_cpu_pwr_mode mode);
void imx6_set_int_mem_clk_lpm(bool enable);
void imx6sl_set_wait_clk(bool enter);
#ifdef CONFIG_HAVE_IMX_MMDC
int imx_mmdc_get_ddr_type(void);
int imx_mmdc_get_lpddr2_2ch_mode(void);
#else
static inline int imx_mmdc_get_ddr_type(void) { return 0; }
static inline int imx_mmdc_get_lpddr2_2ch_mode(void) { return 0; }
#endif
int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode);
void imx_busfreq_map_io(void);
void imx7_pm_map_io(void);

View File

@ -14,22 +14,157 @@
#include "common.h"
#include "hardware.h"
#define GPC_CNTR 0x0
#define GPC_CNTR 0x000
#define GPC_CNTR_L2_PGE 22
#define GPC_IMR1 0x008
#define GPC_PGC_MF_PDN 0x220
#define GPC_PGC_CPU_PDN 0x2a0
#define GPC_PGC_CPU_PUPSCR 0x2a4
#define GPC_PGC_CPU_PDNSCR 0x2a8
#define GPC_PGC_SW2ISO_SHIFT 0x8
#define GPC_PGC_SW_SHIFT 0x0
#define GPC_M4_LPSR 0x2c
#define GPC_M4_LPSR_M4_SLEEPING_SHIFT 4
#define GPC_M4_LPSR_M4_SLEEPING_MASK 0x1
#define GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_MASK 0x1
#define GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_SHIFT 0
#define GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_MASK 0x1
#define GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_SHIFT 1
#define GPC_CNTR_L2_PGE_SHIFT 22
#define GPC_PGC_CPU_SW_SHIFT 0
#define GPC_PGC_CPU_SW_MASK 0x3f
#define GPC_PGC_CPU_SW2ISO_SHIFT 8
#define GPC_PGC_CPU_SW2ISO_MASK 0x3f
#define IMR_NUM 4
#define GPC_MAX_IRQS (IMR_NUM * 32)
/* for irq #74 and #75 */
#define GPC_USB_VBUS_WAKEUP_IRQ_MASK 0xc00
/* for irq #150 and #151 */
#define GPC_ENET_WAKEUP_IRQ_MASK 0xC00000
static void __iomem *gpc_base;
static u32 gpc_wake_irqs[IMR_NUM];
static u32 gpc_saved_imrs[IMR_NUM];
static u32 gpc_mf_irqs[IMR_NUM];
static u32 gpc_mf_request_on[IMR_NUM];
static DEFINE_SPINLOCK(gpc_lock);
/* implemented in drivers/soc/imx/gpc.c */
extern void _imx6_pm_pu_power_off(void);
extern void _imx6_pm_pu_power_on(void);
void imx_gpc_add_m4_wake_up_irq(u32 hwirq, bool enable)
{
unsigned int idx = hwirq / 32;
unsigned long flags;
u32 mask;
/* Sanity check for SPI irq */
if (hwirq < 32)
return;
mask = 1 << hwirq % 32;
spin_lock_irqsave(&gpc_lock, flags);
gpc_wake_irqs[idx] = enable ? gpc_wake_irqs[idx] | mask :
gpc_wake_irqs[idx] & ~mask;
spin_unlock_irqrestore(&gpc_lock, flags);
}
void imx_gpc_hold_m4_in_sleep(void)
{
int val;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
/* wait M4 in wfi before asserting hold request */
while (!imx_gpc_is_m4_sleeping())
if (time_after(jiffies, timeout))
pr_err("M4 is NOT in expected sleep!\n");
val = readl_relaxed(gpc_base + GPC_M4_LPSR);
val &= ~(GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_MASK <<
GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_SHIFT);
writel_relaxed(val, gpc_base + GPC_M4_LPSR);
timeout = jiffies + msecs_to_jiffies(500);
while (readl_relaxed(gpc_base + GPC_M4_LPSR)
& (GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_MASK <<
GPC_M4_LPSR_M4_SLEEP_HOLD_ACK_SHIFT))
if (time_after(jiffies, timeout))
pr_err("Wait M4 hold ack timeout!\n");
}
void imx_gpc_release_m4_in_sleep(void)
{
int val;
val = readl_relaxed(gpc_base + GPC_M4_LPSR);
val |= GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_MASK <<
GPC_M4_LPSR_M4_SLEEP_HOLD_REQ_SHIFT;
writel_relaxed(val, gpc_base + GPC_M4_LPSR);
}
unsigned int imx_gpc_is_m4_sleeping(void)
{
if (readl_relaxed(gpc_base + GPC_M4_LPSR) &
(GPC_M4_LPSR_M4_SLEEPING_MASK <<
GPC_M4_LPSR_M4_SLEEPING_SHIFT))
return 1;
return 0;
}
bool imx_gpc_usb_wakeup_enabled(void)
{
if (!(cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
|| cpu_is_imx6sll()))
return false;
/*
* for SoC later than i.MX6SX, USB vbus wakeup
* only needs weak 2P5 on, stop_mode_config is
* NOT needed, so we check if is USB vbus wakeup
* is enabled(assume irq #74 and #75) to decide
* if to keep weak 2P5 on.
*/
if (gpc_wake_irqs[1] & GPC_USB_VBUS_WAKEUP_IRQ_MASK)
return true;
return false;
}
bool imx_gpc_enet_wakeup_enabled(void)
{
if (!cpu_is_imx6q())
return false;
if (gpc_wake_irqs[3] & GPC_ENET_WAKEUP_IRQ_MASK)
return true;
return false;
}
unsigned int imx_gpc_is_mf_mix_off(void)
{
return readl_relaxed(gpc_base + GPC_PGC_MF_PDN);
}
static void imx_gpc_mf_mix_off(void)
{
int i;
for (i = 0; i < IMR_NUM; i++)
if (((gpc_wake_irqs[i] | gpc_mf_request_on[i]) &
gpc_mf_irqs[i]) != 0)
return;
pr_info("Turn off M/F mix!\n");
/* turn off mega/fast mix */
writel_relaxed(0x1, gpc_base + GPC_PGC_MF_PDN);
}
void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw)
{
@ -53,9 +188,9 @@ void imx_gpc_set_l2_mem_power_in_lpm(bool power_off)
u32 val;
val = readl_relaxed(gpc_base + GPC_CNTR);
val &= ~(1 << GPC_CNTR_L2_PGE_SHIFT);
val &= ~(1 << GPC_CNTR_L2_PGE);
if (power_off)
val |= 1 << GPC_CNTR_L2_PGE_SHIFT;
val |= 1 << GPC_CNTR_L2_PGE;
writel_relaxed(val, gpc_base + GPC_CNTR);
}
@ -64,6 +199,14 @@ void imx_gpc_pre_suspend(bool arm_power_off)
void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
int i;
if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
_imx6_pm_pu_power_off();
/* power down the mega-fast power domain */
if ((cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
|| cpu_is_imx6sll()) && arm_power_off)
imx_gpc_mf_mix_off();
/* Tell GPC to power off ARM core when suspend */
if (arm_power_off)
imx_gpc_set_arm_power_in_lpm(arm_power_off);
@ -79,8 +222,15 @@ void imx_gpc_post_resume(void)
void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
int i;
if (cpu_is_imx6q() && imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
_imx6_pm_pu_power_on();
/* Keep ARM core powered on for other low-power modes */
imx_gpc_set_arm_power_in_lpm(false);
/* Keep M/F mix powered on for other low-power modes */
if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
|| cpu_is_imx6sll())
writel_relaxed(0x0, gpc_base + GPC_PGC_MF_PDN);
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
@ -89,11 +239,14 @@ void imx_gpc_post_resume(void)
static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
{
unsigned int idx = d->hwirq / 32;
unsigned long flags;
u32 mask;
mask = 1 << d->hwirq % 32;
spin_lock_irqsave(&gpc_lock, flags);
gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
gpc_wake_irqs[idx] & ~mask;
spin_unlock_irqrestore(&gpc_lock, flags);
/*
* Do *not* call into the parent, as the GIC doesn't have any
@ -225,11 +378,78 @@ static const struct irq_domain_ops imx_gpc_domain_ops = {
.free = irq_domain_free_irqs_common,
};
int imx_gpc_mf_power_on(unsigned int irq, unsigned int on)
{
struct irq_desc *d = irq_to_desc(irq);
unsigned int idx = d->irq_data.hwirq / 32;
unsigned long flags;
u32 mask;
mask = 1 << (d->irq_data.hwirq % 32);
spin_lock_irqsave(&gpc_lock, flags);
gpc_mf_request_on[idx] = on ? gpc_mf_request_on[idx] | mask :
gpc_mf_request_on[idx] & ~mask;
spin_unlock_irqrestore(&gpc_lock, flags);
return 0;
}
int imx_gpc_mf_request_on(unsigned int irq, unsigned int on)
{
if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
|| cpu_is_imx6sll())
return imx_gpc_mf_power_on(irq, on);
else if (cpu_is_imx7d())
return imx_gpcv2_mf_power_on(irq, on);
else
return 0;
}
EXPORT_SYMBOL_GPL(imx_gpc_mf_request_on);
void imx_gpc_switch_pupscr_clk(bool flag)
{
static u32 pupscr_sw2iso, pupscr_sw;
u32 ratio, pupscr = readl_relaxed(gpc_base + GPC_PGC_CPU_PUPSCR);
if (flag) {
/* save the init clock setting IPG/2048 for IPG@66Mhz */
pupscr_sw2iso = (pupscr >> GPC_PGC_CPU_SW2ISO_SHIFT) &
GPC_PGC_CPU_SW2ISO_MASK;
pupscr_sw = (pupscr >> GPC_PGC_CPU_SW_SHIFT) &
GPC_PGC_CPU_SW_MASK;
/*
* i.MX6UL TO1.0 ARM power up uses IPG/2048 as clock source,
* from TO1.1, PGC_CPU_PUPSCR bit [5] is re-defined to switch
* clock to IPG/32, enable this bit to speed up the ARM power
* up process in low power idle case(IPG@1.5Mhz). So the sw and
* sw2iso need to be adjusted as below:
* sw_new(sw2iso_new) = (2048 * 1.5 / 66 * 32) * sw(sw2iso)
*/
ratio = 3072 / (66 * 32);
pupscr &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT |
GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
pupscr |= (ratio * pupscr_sw + 1) << GPC_PGC_CPU_SW_SHIFT |
1 << 5 | (ratio * pupscr_sw2iso + 1) <<
GPC_PGC_CPU_SW2ISO_SHIFT;
writel_relaxed(pupscr, gpc_base + GPC_PGC_CPU_PUPSCR);
} else {
/* restore back after exit from low power idle */
pupscr &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT |
GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
pupscr |= pupscr_sw << GPC_PGC_CPU_SW_SHIFT |
pupscr_sw2iso << GPC_PGC_CPU_SW2ISO_SHIFT;
writel_relaxed(pupscr, gpc_base + GPC_PGC_CPU_PUPSCR);
}
}
static int __init imx_gpc_init(struct device_node *node,
struct device_node *parent)
{
struct irq_domain *parent_domain, *domain;
int i;
u32 val;
u32 cpu_pupscr_sw2iso, cpu_pupscr_sw;
u32 cpu_pdnscr_iso2sw, cpu_pdnscr_iso;
if (!parent) {
pr_err("%pOF: no parent, giving up\n", node);
@ -258,12 +478,70 @@ static int __init imx_gpc_init(struct device_node *node,
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
/* Read supported wakeup source in M/F domain */
if (cpu_is_imx6sx() || cpu_is_imx6ul() || cpu_is_imx6ull()
|| cpu_is_imx6sll()) {
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 0,
&gpc_mf_irqs[0]);
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 1,
&gpc_mf_irqs[1]);
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 2,
&gpc_mf_irqs[2]);
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 3,
&gpc_mf_irqs[3]);
if (!(gpc_mf_irqs[0] | gpc_mf_irqs[1] |
gpc_mf_irqs[2] | gpc_mf_irqs[3]))
pr_info("No wakeup source in Mega/Fast domain found!\n");
}
/* clear the L2_PGE bit on i.MX6SLL */
if (cpu_is_imx6sll()) {
val = readl_relaxed(gpc_base + GPC_CNTR);
val &= ~(1 << GPC_CNTR_L2_PGE);
writel_relaxed(val, gpc_base + GPC_CNTR);
}
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
* later the GPC power domain driver will not be skipped.
*/
of_node_clear_flag(node, OF_POPULATED);
/*
* If there are CPU isolation timing settings in dts,
* update them according to dts, otherwise, keep them
* with default value in registers.
*/
cpu_pupscr_sw2iso = cpu_pupscr_sw =
cpu_pdnscr_iso2sw = cpu_pdnscr_iso = 0;
/* Read CPU isolation setting for GPC */
of_property_read_u32(node, "fsl,cpu_pupscr_sw2iso", &cpu_pupscr_sw2iso);
of_property_read_u32(node, "fsl,cpu_pupscr_sw", &cpu_pupscr_sw);
of_property_read_u32(node, "fsl,cpu_pdnscr_iso2sw", &cpu_pdnscr_iso2sw);
of_property_read_u32(node, "fsl,cpu_pdnscr_iso", &cpu_pdnscr_iso);
/* Return if no property found in dtb */
if ((cpu_pupscr_sw2iso | cpu_pupscr_sw
| cpu_pdnscr_iso2sw | cpu_pdnscr_iso) == 0)
return 0;
/* Update CPU PUPSCR timing if it is defined in dts */
val = readl_relaxed(gpc_base + GPC_PGC_CPU_PUPSCR);
val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
val |= cpu_pupscr_sw2iso << GPC_PGC_CPU_SW2ISO_SHIFT;
val |= cpu_pupscr_sw << GPC_PGC_CPU_SW_SHIFT;
writel_relaxed(val, gpc_base + GPC_PGC_CPU_PUPSCR);
/* Update CPU PDNSCR timing if it is defined in dts */
val = readl_relaxed(gpc_base + GPC_PGC_CPU_PDNSCR);
val &= ~(GPC_PGC_CPU_SW2ISO_MASK << GPC_PGC_CPU_SW2ISO_SHIFT);
val &= ~(GPC_PGC_CPU_SW_MASK << GPC_PGC_CPU_SW_SHIFT);
val |= cpu_pdnscr_iso2sw << GPC_PGC_CPU_SW2ISO_SHIFT;
val |= cpu_pdnscr_iso << GPC_PGC_CPU_SW_SHIFT;
writel_relaxed(val, gpc_base + GPC_PGC_CPU_PDNSCR);
return 0;
}
IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);

View File

@ -59,6 +59,7 @@
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
static int ddr_type;
static int lpddr2_2ch_mode;
struct fsl_mmdc_devtype_data {
unsigned int flags;
@ -575,6 +576,11 @@ int imx_mmdc_get_ddr_type(void)
return ddr_type;
}
int imx_mmdc_get_lpddr2_2ch_mode(void)
{
return lpddr2_2ch_mode;
}
static struct platform_driver imx_mmdc_driver = {
.driver = {
.name = "imx-mmdc",

View File

@ -0,0 +1,434 @@
/*
* Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/busfreq-imx.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/notifier.h>
#include <linux/platform_device.h>
#include "common.h"
#include "hardware.h"
#define MU_ATR0_OFFSET 0x0
#define MU_ARR0_OFFSET 0x10
#define MU_ARR1_OFFSET 0x14
#define MU_ASR 0x20
#define MU_ACR 0x24
#define MX7ULP_MU_TR0 0x20
#define MX7ULP_MU_RR0 0x40
#define MX7ULP_MU_RR1 0x44
#define MX7ULP_MU_SR 0x60
#define MX7ULP_MU_CR 0x64
#define MU_LPM_HANDSHAKE_INDEX 0
#define MU_RPMSG_HANDSHAKE_INDEX 1
#define MU_LPM_BUS_HIGH_READY_FOR_M4 0xFFFF6666
#define MU_LPM_M4_FREQ_CHANGE_READY 0xFFFF7777
#define MU_LPM_M4_REQUEST_HIGH_BUS 0x2222CCCC
#define MU_LPM_M4_RELEASE_HIGH_BUS 0x2222BBBB
#define MU_LPM_M4_WAKEUP_SRC_VAL 0x55555000
#define MU_LPM_M4_WAKEUP_SRC_MASK 0xFFFFF000
#define MU_LPM_M4_WAKEUP_IRQ_MASK 0xFF0
#define MU_LPM_M4_WAKEUP_IRQ_SHIFT 0x4
#define MU_LPM_M4_WAKEUP_ENABLE_MASK 0xF
#define MU_LPM_M4_WAKEUP_ENABLE_SHIFT 0x0
#define MU_LPM_M4_RUN_MODE 0x5A5A0001
#define MU_LPM_M4_WAIT_MODE 0x5A5A0002
#define MU_LPM_M4_STOP_MODE 0x5A5A0003
#define MAX_NUM 10 /* enlarge it if overflow happen */
static void __iomem *mu_base;
static u32 m4_message[MAX_NUM];
static u32 in_idx, out_idx;
static struct delayed_work mu_work;
static u32 m4_wake_irqs[4];
static bool m4_freq_low;
struct irq_domain *domain;
static bool m4_in_stop;
static struct clk *clk;
static DEFINE_SPINLOCK(mu_lock);
void imx_mu_set_m4_run_mode(void)
{
m4_in_stop = false;
}
bool imx_mu_is_m4_in_stop(void)
{
return m4_in_stop;
}
bool imx_mu_is_m4_in_low_freq(void)
{
return m4_freq_low;
}
void imx_mu_enable_m4_irqs_in_gic(bool enable)
{
int i, j;
for (i = 0; i < 4; i++) {
if (m4_wake_irqs[i] == 0)
continue;
for (j = 0; j < 32; j++) {
if (m4_wake_irqs[i] & (1 << j)) {
if (enable)
enable_irq(irq_find_mapping(
domain, i * 32 + j));
else
disable_irq(irq_find_mapping(
domain, i * 32 + j));
}
}
}
}
static irqreturn_t mcc_m4_dummy_isr(int irq, void *param)
{
return IRQ_HANDLED;
}
static int imx_mu_send_message(unsigned int index, unsigned int data)
{
u32 val, ep;
int i, te_flag = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
/* wait for transfer buffer empty, and no event pending */
do {
if (cpu_is_imx7ulp())
val = readl_relaxed(mu_base + MX7ULP_MU_SR);
else
val = readl_relaxed(mu_base + MU_ASR);
ep = val & BIT(4);
if (time_after(jiffies, timeout)) {
pr_err("Waiting MU transmit buffer empty timeout!\n");
return -EIO;
}
} while (((val & (1 << (20 + 3 - index))) == 0) || (ep == BIT(4)));
if (cpu_is_imx7ulp())
writel_relaxed(data, mu_base + index * 0x4 + MX7ULP_MU_TR0);
else
writel_relaxed(data, mu_base + index * 0x4 + MU_ATR0_OFFSET);
/*
* make a double check that TEn is not empty after write
*/
if (cpu_is_imx7ulp())
val = readl_relaxed(mu_base + MX7ULP_MU_SR);
else
val = readl_relaxed(mu_base + MU_ASR);
ep = val & BIT(4);
if (((val & (1 << (20 + (3 - index)))) == 0) || (ep == BIT(4)))
return 0;
else
te_flag = 1;
/*
* Make sure that TEn flag is changed, after the ATRn is filled up.
*/
for (i = 0; i < 100; i++) {
if (cpu_is_imx7ulp())
val = readl_relaxed(mu_base + MX7ULP_MU_SR);
else
val = readl_relaxed(mu_base + MU_ASR);
ep = val & BIT(4);
if (((val & (1 << (20 + 3 - index))) == 0) || (ep == BIT(4))) {
/*
* BUG here. TEn flag is changes, after the
* ATRn is filled with MSG for a while.
*/
te_flag = 0;
break;
} else if (time_after(jiffies, timeout)) {
/* Can't see TEn 1->0, maybe already handled! */
te_flag = 1;
break;
}
}
if (te_flag == 0)
pr_info("BUG: TEn is not changed immediately"
"when ATRn is filled up.\n");
return 0;
}
static void mu_work_handler(struct work_struct *work)
{
int ret;
u32 irq, enable, idx, mask, virq;
struct of_phandle_args args;
u32 message;
unsigned long flags;
spin_lock_irqsave(&mu_lock, flags);
message = m4_message[out_idx % MAX_NUM];
spin_unlock_irqrestore(&mu_lock, flags);
pr_debug("receive M4 message 0x%x\n", message);
switch (message) {
case MU_LPM_M4_RUN_MODE:
case MU_LPM_M4_WAIT_MODE:
m4_in_stop = false;
break;
case MU_LPM_M4_STOP_MODE:
m4_in_stop = true;
break;
case MU_LPM_M4_REQUEST_HIGH_BUS:
request_bus_freq(BUS_FREQ_HIGH);
#ifdef CONFIG_SOC_IMX6SX
if (cpu_is_imx6sx())
imx6sx_set_m4_highfreq(true);
#endif
imx_mu_send_message(MU_LPM_HANDSHAKE_INDEX,
MU_LPM_BUS_HIGH_READY_FOR_M4);
m4_freq_low = false;
break;
case MU_LPM_M4_RELEASE_HIGH_BUS:
release_bus_freq(BUS_FREQ_HIGH);
#ifdef CONFIG_SOC_IMX6SX
if (cpu_is_imx6sx()) {
imx6sx_set_m4_highfreq(false);
imx_mu_send_message(MU_LPM_HANDSHAKE_INDEX,
MU_LPM_M4_FREQ_CHANGE_READY);
}
#endif
m4_freq_low = true;
break;
default:
if ((message & MU_LPM_M4_WAKEUP_SRC_MASK) ==
MU_LPM_M4_WAKEUP_SRC_VAL) {
irq = (message & MU_LPM_M4_WAKEUP_IRQ_MASK) >>
MU_LPM_M4_WAKEUP_IRQ_SHIFT;
enable = (message & MU_LPM_M4_WAKEUP_ENABLE_MASK) >>
MU_LPM_M4_WAKEUP_ENABLE_SHIFT;
/* to hwirq start from 0 */
irq -= 32;
idx = irq / 32;
mask = 1 << irq % 32;
args.np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-gpc");
args.args_count = 3;
args.args[0] = 0;
args.args[1] = irq;
args.args[2] = IRQ_TYPE_LEVEL_HIGH;
virq = irq_create_of_mapping(&args);
if (enable && can_request_irq(virq, 0)) {
ret = request_irq(virq, mcc_m4_dummy_isr,
IRQF_NO_SUSPEND, "imx-m4-dummy", NULL);
if (ret) {
pr_err("%s: register interrupt %d failed, rc %d\n",
__func__, virq, ret);
break;
}
disable_irq(virq);
m4_wake_irqs[idx] = m4_wake_irqs[idx] | mask;
}
imx_gpc_add_m4_wake_up_irq(irq, enable);
}
break;
}
spin_lock_irqsave(&mu_lock, flags);
m4_message[out_idx % MAX_NUM] = 0;
out_idx++;
spin_unlock_irqrestore(&mu_lock, flags);
/* enable RIE3 interrupt */
if (cpu_is_imx7ulp())
writel_relaxed(readl_relaxed(mu_base + MX7ULP_MU_CR) | BIT(27),
mu_base + MX7ULP_MU_CR);
else
writel_relaxed(readl_relaxed(mu_base + MU_ACR) | BIT(27),
mu_base + MU_ACR);
}
int imx_mu_lpm_ready(bool ready)
{
u32 val;
if (cpu_is_imx7ulp()) {
val = readl_relaxed(mu_base + MX7ULP_MU_CR);
if (ready)
writel_relaxed(val | BIT(0), mu_base + MX7ULP_MU_CR);
else
writel_relaxed(val & ~BIT(0), mu_base + MX7ULP_MU_CR);
} else {
val = readl_relaxed(mu_base + MU_ACR);
if (ready)
writel_relaxed(val | BIT(0), mu_base + MU_ACR);
else
writel_relaxed(val & ~BIT(0), mu_base + MU_ACR);
}
return 0;
}
static irqreturn_t imx_mu_isr(int irq, void *param)
{
u32 irqs;
unsigned long flags;
if (cpu_is_imx7ulp())
irqs = readl_relaxed(mu_base + MX7ULP_MU_SR);
else
irqs = readl_relaxed(mu_base + MU_ASR);
if (irqs & (1 << 27)) {
spin_lock_irqsave(&mu_lock, flags);
/* get message from receive buffer */
if (cpu_is_imx7ulp())
m4_message[in_idx % MAX_NUM] = readl_relaxed(mu_base +
MX7ULP_MU_RR0);
else
m4_message[in_idx % MAX_NUM] = readl_relaxed(mu_base +
MU_ARR0_OFFSET);
/* disable RIE3 interrupt */
if (cpu_is_imx7ulp())
writel_relaxed(readl_relaxed(mu_base + MX7ULP_MU_CR)
& (~BIT(27)), mu_base + MX7ULP_MU_CR);
else
writel_relaxed(readl_relaxed(mu_base + MU_ACR)
& (~BIT(27)), mu_base + MU_ACR);
in_idx++;
if (in_idx == out_idx) {
spin_unlock_irqrestore(&mu_lock, flags);
pr_err("MU overflow!\n");
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&mu_lock, flags);
schedule_delayed_work(&mu_work, 0);
}
return IRQ_HANDLED;
}
static int imx_mu_probe(struct platform_device *pdev)
{
int ret;
u32 irq;
struct device_node *np;
struct device *dev = &pdev->dev;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-mu");
mu_base = of_iomap(np, 0);
WARN_ON(!mu_base);
ret = of_device_is_compatible(np, "fsl,imx7ulp-mu");
if (ret)
irq = platform_get_irq(pdev, 1);
else
irq = platform_get_irq(pdev, 0);
ret = request_irq(irq, imx_mu_isr,
IRQF_EARLY_RESUME | IRQF_SHARED, "imx-mu", dev);
if (ret) {
pr_err("%s: register interrupt %d failed, rc %d\n",
__func__, irq, ret);
return ret;
}
ret = of_device_is_compatible(np, "fsl,imx7d-mu");
if (ret) {
clk = devm_clk_get(&pdev->dev, "mu");
if (IS_ERR(clk)) {
dev_err(&pdev->dev,
"mu clock source missing or invalid\n");
return PTR_ERR(clk);
} else {
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&pdev->dev,
"unable to enable mu clock\n");
return ret;
}
}
/* MU always as a wakeup source for low power mode */
imx_gpcv2_add_m4_wake_up_irq(irq_to_desc(irq)->irq_data.hwirq,
true);
} else {
/* MU always as a wakeup source for low power mode */
imx_gpc_add_m4_wake_up_irq(irq_to_desc(irq)->irq_data.hwirq, true);
}
INIT_DELAYED_WORK(&mu_work, mu_work_handler);
/* bit0 of MX7ULP_MU_CR used to let m4 to know MU is ready now */
if (cpu_is_imx7ulp())
writel_relaxed(readl_relaxed(mu_base + MX7ULP_MU_CR) |
BIT(0) | BIT(26) | BIT(27), mu_base + MX7ULP_MU_CR);
else
writel_relaxed(readl_relaxed(mu_base + MU_ACR) |
BIT(26) | BIT(27), mu_base + MU_ACR);
pr_info("MU is ready for cross core communication!\n");
return 0;
}
static const struct of_device_id imx_mu_ids[] = {
{ .compatible = "fsl,imx6sx-mu" },
{ .compatible = "fsl,imx7d-mu" },
{ .compatible = "fsl,imx7ulp-mu" },
{ }
};
#ifdef CONFIG_PM_SLEEP
static int mu_suspend(struct device *dev)
{
return 0;
}
static int mu_resume(struct device *dev)
{
if (!cpu_is_imx7ulp())
return 0;
writel_relaxed(readl_relaxed(mu_base + MX7ULP_MU_CR) |
BIT(0) | BIT(26) | BIT(27), mu_base + MX7ULP_MU_CR);
return 0;
}
#endif
static const struct dev_pm_ops mu_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(mu_suspend, mu_resume)
};
static struct platform_driver imx_mu_driver = {
.driver = {
.name = "imx-mu",
.owner = THIS_MODULE,
.pm = &mu_pm_ops,
.of_match_table = imx_mu_ids,
},
.probe = imx_mu_probe,
};
static int __init imx_mu_init(void)
{
return platform_driver_register(&imx_mu_driver);
}
subsys_initcall(imx_mu_init);

View File

@ -38,6 +38,9 @@
#define IMX_DDR_TYPE_LPDDR3 2
#define IMX_MMDC_DDR_TYPE_LPDDR3 3
#define IMX_LPDDR2_1CH_MODE 0
#define IMX_LPDDR2_2CH_MODE 1
#ifndef __ASSEMBLY__
extern unsigned int __mxc_cpu_type;
@ -105,6 +108,11 @@ static inline bool cpu_is_imx7d(void)
return __mxc_cpu_type == MXC_CPU_IMX7D;
}
static inline bool cpu_is_imx7ulp(void)
{
return __mxc_cpu_type == MXC_CPU_IMX7ULP;
}
struct cpu_op {
u32 cpu_rate;
};

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,7 @@
static void __iomem *src_base;
static DEFINE_SPINLOCK(scr_lock);
static bool m4_is_enabled;
static const int sw_reset_bits[5] = {
BP_SRC_SCR_SW_GPU_RST,
@ -35,6 +36,11 @@ static const int sw_reset_bits[5] = {
BP_SRC_SCR_SW_IPU2_RST
};
bool imx_src_is_m4_enabled(void)
{
return m4_is_enabled;
}
static int imx_src_reset_module(struct reset_controller_dev *rcdev,
unsigned long sw_reset_idx)
{

View File

@ -41,23 +41,32 @@
#define PM_INFO_RESUME_ADDR_OFFSET 0x4
#define PM_INFO_DDR_TYPE_OFFSET 0x8
#define PM_INFO_PM_INFO_SIZE_OFFSET 0xC
#define PM_INFO_MX6Q_MMDC_P_OFFSET 0x10
#define PM_INFO_MX6Q_MMDC_V_OFFSET 0x14
#define PM_INFO_MX6Q_SRC_P_OFFSET 0x18
#define PM_INFO_MX6Q_SRC_V_OFFSET 0x1C
#define PM_INFO_MX6Q_IOMUXC_P_OFFSET 0x20
#define PM_INFO_MX6Q_IOMUXC_V_OFFSET 0x24
#define PM_INFO_MX6Q_CCM_P_OFFSET 0x28
#define PM_INFO_MX6Q_CCM_V_OFFSET 0x2C
#define PM_INFO_MX6Q_GPC_P_OFFSET 0x30
#define PM_INFO_MX6Q_GPC_V_OFFSET 0x34
#define PM_INFO_MX6Q_L2_P_OFFSET 0x38
#define PM_INFO_MX6Q_L2_V_OFFSET 0x3C
#define PM_INFO_MMDC_IO_NUM_OFFSET 0x40
#define PM_INFO_MMDC_IO_VAL_OFFSET 0x44
#define PM_INFO_MX6Q_MMDC0_P_OFFSET 0x10
#define PM_INFO_MX6Q_MMDC0_V_OFFSET 0x14
#define PM_INFO_MX6Q_MMDC1_P_OFFSET 0x18
#define PM_INFO_MX6Q_MMDC1_V_OFFSET 0x1C
#define PM_INFO_MX6Q_SRC_P_OFFSET 0x20
#define PM_INFO_MX6Q_SRC_V_OFFSET 0x24
#define PM_INFO_MX6Q_IOMUXC_P_OFFSET 0x28
#define PM_INFO_MX6Q_IOMUXC_V_OFFSET 0x2C
#define PM_INFO_MX6Q_CCM_P_OFFSET 0x30
#define PM_INFO_MX6Q_CCM_V_OFFSET 0x34
#define PM_INFO_MX6Q_GPC_P_OFFSET 0x38
#define PM_INFO_MX6Q_GPC_V_OFFSET 0x3C
#define PM_INFO_MX6Q_L2_P_OFFSET 0x40
#define PM_INFO_MX6Q_L2_V_OFFSET 0x44
#define PM_INFO_MX6Q_ANATOP_P_OFFSET 0x48
#define PM_INFO_MX6Q_ANATOP_V_OFFSET 0x4C
#define PM_INFO_MX6Q_TTBR1_V_OFFSET 0x50
#define PM_INFO_MMDC_IO_NUM_OFFSET 0x54
#define PM_INFO_MMDC_IO_VAL_OFFSET 0x58
/* below offsets depends on MX6_MAX_MMDC_IO_NUM(36) definition */
#define PM_INFO_MMDC_NUM_OFFSET 0x208
#define PM_INFO_MMDC_VAL_OFFSET 0x20C
#define MX6Q_SRC_GPR1 0x20
#define MX6Q_SRC_GPR2 0x24
#define MX6Q_MMDC_MISC 0x18
#define MX6Q_MMDC_MAPSR 0x404
#define MX6Q_MMDC_MPDGCTRL0 0x83c
#define MX6Q_GPC_IMR1 0x08
@ -65,9 +74,49 @@
#define MX6Q_GPC_IMR3 0x10
#define MX6Q_GPC_IMR4 0x14
#define MX6Q_CCM_CCR 0x0
#define MX6Q_ANATOP_CORE 0x140
.align 3
/* Check if the cpu is cortex-a7 */
.macro is_cortex_a7
/* Read the primary cpu number is MPIDR */
mrc p15, 0, r5, c0, c0, 0
ldr r6, =0xfff0
and r5, r5, r6
ldr r6, =0xc070
cmp r5, r6
.endm
.macro disable_l1_cache
/*
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/
push {r0 - r10, lr}
ldr r7, =v7_flush_dcache_all
mov lr, pc
mov pc, r7
pop {r0 - r10, lr}
/* disable d-cache */
mrc p15, 0, r7, c1, c0, 0
bic r7, r7, #(1 << 2)
mcr p15, 0, r7, c1, c0, 0
dsb
isb
push {r0 -r10, lr}
ldr r7, = v7_flush_dcache_all
mov lr, pc
mov pc , r7
pop {r0 -r10, lr}
.endm
.macro sync_l2_cache
/* sync L2 cache to drain L2's buffers to DRAM. */
@ -86,29 +135,8 @@
.endm
.macro resume_mmdc
/* restore MMDC IO */
cmp r5, #0x0
ldreq r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET]
ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET
add r7, r7, r0
1:
ldr r8, [r7], #0x4
ldr r9, [r7], #0x4
str r9, [r11, r8]
subs r6, r6, #0x1
bne 1b
cmp r5, #0x0
ldreq r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX6Q_MMDC_P_OFFSET]
cmp r3, #IMX_DDR_TYPE_LPDDR2
bne 4f
/* r11 must be MMDC base address */
.macro reset_read_fifo
/* reset read FIFO, RST_RD_FIFO */
ldr r7, =MX6Q_MMDC_MPDGCTRL0
@ -128,23 +156,294 @@
ldr r6, [r11, r7]
ands r6, r6, #(1 << 31)
bne 3b
/* check if lppdr2 2 channel mode is enabled */
ldr r7, =MX6Q_MMDC_MISC
ldr r6, [r11, r7]
ands r6, r6, #(1 << 2)
beq 6f
ldr r7, =MX6Q_MMDC_MPDGCTRL0
ldr r6, [r12, r7]
orr r6, r6, #(1 << 31)
str r6, [r12, r7]
4:
ldr r6, [r12, r7]
ands r6, r6, #(1 << 31)
bne 4b
ldr r6, [r12, r7]
orr r6, r6, #(1 << 31)
str r6, [r12, r7]
5:
ldr r6, [r12, r7]
ands r6, r6, #(1 << 31)
bne 5b
6:
.endm
/* r11 must be MMDC base address */
.macro mmdc_out_and_auto_self_refresh
/* let DDR out of self-refresh */
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
bic r7, r7, #(1 << 21)
str r7, [r11, #MX6Q_MMDC_MAPSR]
5:
7:
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
ands r7, r7, #(1 << 25)
bne 5b
bne 7b
/* enable DDR auto power saving */
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
bic r7, r7, #0x1
str r7, [r11, #MX6Q_MMDC_MAPSR]
/* check if lppdr2 2 channel mode is enabled */
ldr r7, =MX6Q_MMDC_MISC
ldr r6, [r11, r7]
ands r6, r6, #(1 << 2)
beq 9f
ldr r7, [r12, #MX6Q_MMDC_MAPSR]
bic r7, r7, #(1 << 21)
str r7, [r12, #MX6Q_MMDC_MAPSR]
8:
ldr r7, [r12, #MX6Q_MMDC_MAPSR]
ands r7, r7, #(1 << 25)
bne 8b
ldr r7, [r12, #MX6Q_MMDC_MAPSR]
bic r7, r7, #0x1
str r7, [r12, #MX6Q_MMDC_MAPSR]
9:
.endm
/* r10 must be iomuxc base address */
.macro resume_iomuxc_gpr
add r10, r10, #0x4000
/* IOMUXC GPR DRAM_RESET_BYPASS */
ldr r4, [r10, #0x8]
bic r4, r4, #(0x1 << 27)
str r4, [r10, #0x8]
/* IOMUXC GPR DRAM_CKE_BYPASS */
ldr r4, [r10, #0x8]
bic r4, r4, #(0x1 << 31)
str r4, [r10, #0x8]
.endm
.macro resume_io
/* restore MMDC IO */
cmp r5, #0x0
ldreq r10, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
ldrne r10, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET]
ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET
add r7, r7, r0
10:
ldr r8, [r7], #0x4
ldr r9, [r7], #0x8
str r9, [r10, r8]
subs r6, r6, #0x1
bne 10b
cmp r5, #0x0
/* Here only MMDC0 is set */
ldreq r11, [r0, #PM_INFO_MX6Q_MMDC0_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX6Q_MMDC0_P_OFFSET]
ldreq r12, [r0, #PM_INFO_MX6Q_MMDC1_V_OFFSET]
ldrne r12, [r0, #PM_INFO_MX6Q_MMDC1_P_OFFSET]
reset_read_fifo
mmdc_out_and_auto_self_refresh
.endm
.macro resume_mmdc_io
cmp r5, #0x0
ldreq r10, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
ldrne r10, [r0, #PM_INFO_MX6Q_IOMUXC_P_OFFSET]
ldreq r11, [r0, #PM_INFO_MX6Q_MMDC0_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX6Q_MMDC0_P_OFFSET]
/* resume mmdc iomuxc settings */
ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
ldr r7, =PM_INFO_MMDC_IO_VAL_OFFSET
add r7, r7, r0
11:
ldr r8, [r7], #0x4
ldr r9, [r7], #0x8
str r9, [r10, r8]
subs r6, r6, #0x1
bne 11b
/* check whether we need to restore MMDC */
cmp r5, #0x0
beq 12f
/* check whether last suspend is with M/F mix off */
ldr r9, [r0, #PM_INFO_MX6Q_GPC_P_OFFSET]
ldr r6, [r9, #0x220]
cmp r6, #0x0
bne 13f
12:
resume_iomuxc_gpr
reset_read_fifo
b 17f
13:
/* restore MMDC settings */
ldr r6, [r0, #PM_INFO_MMDC_NUM_OFFSET]
ldr r7, =PM_INFO_MMDC_VAL_OFFSET
add r7, r7, r0
14:
ldr r8, [r7], #0x4
ldr r9, [r7], #0x4
str r9, [r11, r8]
subs r6, r6, #0x1
bne 14b
/* let DDR enter self-refresh */
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
orr r7, r7, #(1 << 20)
str r7, [r11, #MX6Q_MMDC_MAPSR]
15:
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
ands r7, r7, #(1 << 24)
beq 15b
resume_iomuxc_gpr
reset_read_fifo
/* let DDR out of self-refresh */
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
bic r7, r7, #(1 << 20)
str r7, [r11, #MX6Q_MMDC_MAPSR]
16:
ldr r7, [r11, #MX6Q_MMDC_MAPSR]
ands r7, r7, #(1 << 24)
bne 16b
/* kick off MMDC */
ldr r4, =0x0
str r4, [r11, #0x1c]
17:
mmdc_out_and_auto_self_refresh
.endm
.macro store_ttbr1
/* Store TTBR1 to pm_info->ttbr1 */
mrc p15, 0, r7, c2, c0, 1
str r7, [r0, #PM_INFO_MX6Q_TTBR1_V_OFFSET]
/* Disable Branch Prediction, Z bit in SCTLR. */
mrc p15, 0, r6, c1, c0, 0
bic r6, r6, #0x800
mcr p15, 0, r6, c1, c0, 0
/* Flush the BTAC. */
ldr r6, =0x0
mcr p15, 0, r6, c7, c1, 6
ldr r6, =iram_tlb_phys_addr
ldr r6, [r6]
dsb
isb
/* Store the IRAM table in TTBR1 */
mcr p15, 0, r6, c2, c0, 1
/* Read TTBCR and set PD0=1, N = 1 */
mrc p15, 0, r6, c2, c0, 2
orr r6, r6, #0x11
mcr p15, 0, r6, c2, c0, 2
dsb
isb
/* flush the TLB */
ldr r6, =0x0
mcr p15, 0, r6, c8, c3, 0
/* Disable L1 data cache. */
mrc p15, 0, r6, c1, c0, 0
bic r6, r6, #0x4
mcr p15, 0, r6, c1, c0, 0
dsb
isb
is_cortex_a7
beq 17f
#ifdef CONFIG_CACHE_L2X0
ldr r8, [r0, #PM_INFO_MX6Q_L2_V_OFFSET]
mov r6, #0x0
str r6, [r8, #0x100]
dsb
isb
#endif
17:
.endm
.macro restore_ttbr1
is_cortex_a7
beq 18f
#ifdef CONFIG_CACHE_L2X0
/* Enable L2. */
ldr r8, [r0, #PM_INFO_MX6Q_L2_V_OFFSET]
ldr r7, =0x1
str r7, [r8, #0x100]
#endif
18:
/* Enable L1 data cache. */
mrc p15, 0, r6, c1, c0, 0
orr r6, r6, #0x4
mcr p15, 0, r6, c1, c0, 0
dsb
isb
/* Restore TTBCR */
/* Read TTBCR and set PD0=0, N = 0 */
mrc p15, 0, r6, c2, c0, 2
bic r6, r6, #0x11
mcr p15, 0, r6, c2, c0, 2
dsb
isb
/* flush the TLB */
ldr r6, =0x0
mcr p15, 0, r6, c8, c3, 0
/* Enable Branch Prediction, Z bit in SCTLR. */
mrc p15, 0, r6, c1, c0, 0
orr r6, r6, #0x800
mcr p15, 0, r6, c1, c0, 0
/* Flush the Branch Target Address Cache (BTAC) */
ldr r6, =0x0
mcr p15, 0, r6, c7, c1, 6
/* Restore TTBR1, get the origin ttbr1 from pm info */
ldr r7, [r0, #PM_INFO_MX6Q_TTBR1_V_OFFSET]
mcr p15, 0, r7, c2, c0, 1
.endm
ENTRY(imx6_suspend)
ldr r1, [r0, #PM_INFO_PBASE_OFFSET]
ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
@ -179,10 +478,25 @@ ENTRY(imx6_suspend)
str r9, [r11, #MX6Q_SRC_GPR1]
str r1, [r11, #MX6Q_SRC_GPR2]
/*
* Check if the cpu is Cortex-A7, for Cortex-A7
* the cache implementation is not the same as
* Cortex-A9, so the cache maintenance operation
* is different.
*/
is_cortex_a7
beq a7_dache_flush
/* need to sync L2 cache before DSM. */
sync_l2_cache
b ttbr_store
a7_dache_flush:
disable_l1_cache
ttbr_store:
store_ttbr1
ldr r11, [r0, #PM_INFO_MX6Q_MMDC_V_OFFSET]
ldr r11, [r0, #PM_INFO_MX6Q_MMDC0_V_OFFSET]
ldr r12, [r0, #PM_INFO_MX6Q_MMDC1_V_OFFSET]
/*
* put DDR explicitly into self-refresh and
* disable automatic power savings.
@ -201,31 +515,59 @@ poll_dvfs_set:
ands r7, r7, #(1 << 25)
beq poll_dvfs_set
/* check if lppdr2 2 channel mode is enabled */
ldr r7, =MX6Q_MMDC_MISC
ldr r6, [r11, r7]
ands r6, r6, #(1 << 2)
beq skip_self_refresh_ch1
ldr r7, [r12, #MX6Q_MMDC_MAPSR]
orr r7, r7, #0x1
str r7, [r12, #MX6Q_MMDC_MAPSR]
ldr r7, [r12, #MX6Q_MMDC_MAPSR]
orr r7, r7, #(1 << 21)
str r7, [r12, #MX6Q_MMDC_MAPSR]
poll_dvfs_set_ch1:
ldr r7, [r12, #MX6Q_MMDC_MAPSR]
ands r7, r7, #(1 << 25)
beq poll_dvfs_set_ch1
skip_self_refresh_ch1:
/* use r11 to store the IO address */
ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
ldr r6, =0x0
ldr r7, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
ldr r6, [r0, #PM_INFO_MMDC_IO_NUM_OFFSET]
ldr r8, =PM_INFO_MMDC_IO_VAL_OFFSET
add r8, r8, r0
/* LPDDR2's last 3 IOs need special setting */
cmp r3, #IMX_DDR_TYPE_LPDDR2
subeq r7, r7, #0x3
set_mmdc_io_lpm:
ldr r9, [r8], #0x8
str r6, [r11, r9]
subs r7, r7, #0x1
ldr r7, [r8], #0x8
ldr r9, [r8], #0x4
str r9, [r11, r7]
subs r6, r6, #0x1
bne set_mmdc_io_lpm
cmp r3, #IMX_DDR_TYPE_LPDDR2
bne set_mmdc_io_lpm_done
ldr r6, =0x1000
ldr r9, [r8], #0x8
str r6, [r11, r9]
ldr r9, [r8], #0x8
str r6, [r11, r9]
ldr r6, =0x80000
ldr r9, [r8]
str r6, [r11, r9]
set_mmdc_io_lpm_done:
/* check whether it supports Mega/Fast off */
ldr r6, [r0, #PM_INFO_MMDC_NUM_OFFSET]
cmp r6, #0x0
beq set_mmdc_lpm_done
/* IOMUXC GPR DRAM_RESET */
add r11, r11, #0x4000
ldr r6, [r11, #0x8]
orr r6, r6, #(0x1 << 28)
str r6, [r11, #0x8]
/* IOMUXC GPR DRAM_RESET_BYPASS */
ldr r6, [r11, #0x8]
orr r6, r6, #(0x1 << 27)
str r6, [r11, #0x8]
/* IOMUXC GPR DRAM_CKE_BYPASS */
ldr r6, [r11, #0x8]
orr r6, r6, #(0x1 << 31)
str r6, [r11, #0x8]
set_mmdc_lpm_done:
/*
* mask all GPC interrupts before
@ -285,6 +627,27 @@ rbc_loop:
subs r6, r6, #0x1
bne rbc_loop
/*
* ERR005852 Analog: Transition from Deep Sleep Mode to
* LDO Bypass Mode may cause the slow response of the
* VDDARM_CAP output.
*
* Software workaround:
* if internal ldo(VDDARM) bypassed, switch to analog bypass
* mode (0x1E), prio to entering DSM, and then, revert to the
* normal bypass mode, when exiting from DSM.
*/
ldr r11, [r0, #PM_INFO_MX6Q_ANATOP_V_OFFSET]
ldr r10, [r11, #MX6Q_ANATOP_CORE]
and r10, r10, #0x1f
cmp r10, #0x1f
bne ldo_check_done1
ldo_analog_bypass:
ldr r10, [r11, #MX6Q_ANATOP_CORE]
bic r10, r10, #0x1f
orr r10, r10, #0x1e
str r10, [r11, #MX6Q_ANATOP_CORE]
ldo_check_done1:
/* Zzz, enter stop mode */
wfi
nop
@ -297,8 +660,28 @@ rbc_loop:
* wakeup source, system should auto
* resume, we need to restore MMDC IO first
*/
/* restore it with 0x1f if use ldo bypass mode.*/
ldr r10, [r11, #MX6Q_ANATOP_CORE]
and r10, r10, #0x1f
cmp r10, #0x1e
bne ldo_check_done2
ldo_bypass_restore:
ldr r10, [r11, #MX6Q_ANATOP_CORE]
orr r10, r10, #0x1f
str r10, [r11, #MX6Q_ANATOP_CORE]
ldo_check_done2:
mov r5, #0x0
resume_mmdc
/* check whether it supports Mega/Fast off */
ldr r6, [r0, #PM_INFO_MMDC_NUM_OFFSET]
cmp r6, #0x0
beq only_resume_io
resume_mmdc_io
b resume_mmdc_done
only_resume_io:
resume_io
resume_mmdc_done:
restore_ttbr1
/* return to suspend finish */
ret lr
@ -313,6 +696,16 @@ resume:
mcr p15, 0, r6, c1, c0, 0
isb
/* restore it with 0x1f if use ldo bypass mode.*/
ldr r11, [r0, #PM_INFO_MX6Q_ANATOP_P_OFFSET]
ldr r7, [r11, #MX6Q_ANATOP_CORE]
and r7, r7, #0x1f
cmp r7, #0x1e
bne ldo_check_done3
ldr r7, [r11, #MX6Q_ANATOP_CORE]
orr r7, r7, #0x1f
str r7, [r11, #MX6Q_ANATOP_CORE]
ldo_check_done3:
/* get physical resume address from pm_info. */
ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
/* clear core0's entry and parameter */
@ -323,7 +716,16 @@ resume:
ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET]
mov r5, #0x1
resume_mmdc
/* check whether it supports Mega/Fast off */
ldr r6, [r0, #PM_INFO_MMDC_NUM_OFFSET]
cmp r6, #0x0
beq dsm_only_resume_io
resume_mmdc_io
b dsm_resume_mmdc_done
dsm_only_resume_io:
ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET]
resume_io
dsm_resume_mmdc_done:
ret lr
ENDPROC(imx6_suspend)
@ -336,8 +738,11 @@ ENDPROC(imx6_suspend)
ENTRY(v7_cpu_resume)
bl v7_invalidate_l1
is_cortex_a7
beq done
#ifdef CONFIG_CACHE_L2X0
bl l2c310_early_resume
#endif
done:
b cpu_resume
ENDPROC(v7_cpu_resume)