diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index a266b30d033f..57a2eb8e908a 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -40,6 +40,10 @@ config HAVE_IMX_GPC bool select PM_GENERIC_DOMAINS if PM +config HAVE_IMX_GPCV2 + bool + select PM_GENERIC_DOMAINS if PM + config HAVE_IMX_MMDC bool @@ -571,6 +575,7 @@ config SOC_IMX7D_CA7 select IMX_GPCV2 select HAVE_IMX_DDRC select HAVE_IMX_MU + select HAVE_IMX_GPCV2 select KEYBOARD_SNVS_PWRKEY config SOC_IMX7D_CM4 diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 1c2b9421065c..84e8112afb2b 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -31,6 +31,8 @@ obj-$(CONFIG_SOC_IMX6SX) += cpuidle-imx6sx.o AFLAGS_imx6sx_low_power_idle.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6UL) += cpuidle-imx6sx.o obj-$(CONFIG_SOC_IMX7ULP) += cpuidle-imx7ulp.o +AFLAGS_imx7d_low_power_idle.o :=-Wa,-march=armv7-a +obj-$(CONFIG_SOC_IMX7D_CA7) += cpuidle-imx7d.o imx7d_low_power_idle.o endif ifdef CONFIG_SND_SOC_IMX_PCM_FIQ @@ -71,6 +73,7 @@ obj-$(CONFIG_MACH_IMX35_DT) += imx35-dt.o obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o +obj-$(CONFIG_HAVE_IMX_GPCV2) += gpcv2.o obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o obj-$(CONFIG_HAVE_IMX_SRC) += src.o obj-$(CONFIG_HAVE_IMX_DDRC) += ddrc.o @@ -87,7 +90,7 @@ obj-$(CONFIG_SOC_IMX6SLL) += mach-imx6sl.o obj-$(CONFIG_SOC_IMX6SX) += mach-imx6sx.o ddr3_freq_imx6sx.o smp_wfe_imx6.o lpddr2_freq_imx6sx.o obj-$(CONFIG_SOC_IMX6UL) += mach-imx6ul.o obj-$(CONFIG_SOC_IMX7D_CA7) += mach-imx7d.o pm-imx7.o ddr3_freq_imx7d.o smp_wfe.o \ - lpddr3_freq_imx.o + lpddr3_freq_imx.o suspend-imx7.o obj-$(CONFIG_SOC_IMX7D_CM4) += mach-imx7d-cm4.o obj-$(CONFIG_SOC_IMX7ULP) += mach-imx7ulp.o pm-imx7ulp.o @@ -103,6 +106,7 @@ AFLAGS_ddr3_freq_imx6sx.o :=-Wa,-march=armv7-a ifeq ($(CONFIG_SUSPEND),y) AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a +AFLAGS_suspend-imx7.o :=-Wa,-march=armv7-a obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o endif diff --git a/arch/arm/mach-imx/busfreq-imx.c b/arch/arm/mach-imx/busfreq-imx.c index 36fcc56334ff..c4aae89ced16 100644 --- a/arch/arm/mach-imx/busfreq-imx.c +++ b/arch/arm/mach-imx/busfreq-imx.c @@ -804,12 +804,16 @@ static int bus_freq_pm_notify(struct notifier_block *nb, unsigned long event, mutex_lock(&bus_freq_mutex); if (event == PM_SUSPEND_PREPARE) { + if (cpu_is_imx7d() && imx_src_is_m4_enabled()) + imx_mu_lpm_ready(false); high_bus_count++; set_high_bus_freq(1); busfreq_suspended = 1; } else if (event == PM_POST_SUSPEND) { busfreq_suspended = 0; high_bus_count--; + if (cpu_is_imx7d() && imx_src_is_m4_enabled()) + imx_mu_lpm_ready(true); schedule_delayed_work(&bus_freq_daemon, usecs_to_jiffies(5000000)); } @@ -1057,6 +1061,11 @@ static int busfreq_probe(struct platform_device *pdev) (clk_get_rate(m4_clk) > LPAPM_CLK)) high_bus_count++; } + + if (cpu_is_imx7d() && imx_src_is_m4_enabled()) { + high_bus_count++; + imx_mu_lpm_ready(true); + } } if (err) { diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 54f4c667db81..19a3c0243b77 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -57,9 +57,19 @@ void imx_gpc_set_arm_power_in_lpm(bool power_off); void imx_gpc_set_l2_mem_power_in_lpm(bool power_off); void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw); void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw); +void imx_gpcv2_pre_suspend(bool arm_power_off); +void imx_gpcv2_post_resume(void); +unsigned int imx_gpcv2_is_mf_mix_off(void); +void imx_gpcv2_enable_wakeup_for_m4(void); +void imx_gpcv2_disable_wakeup_for_m4(void); void imx25_pm_init(void); void imx27_pm_init(void); void imx5_pmu_init(void); +#ifdef CONFIG_HAVE_IMX_MU +int imx_mu_lpm_ready(bool ready); +#else +static inline int imx_mu_lpm_ready(bool ready) { return 0; } +#endif enum mxc_cpu_pwr_mode { WAIT_CLOCKED, /* wfi only */ @@ -110,6 +120,10 @@ static inline void imx_gpcv2_add_m4_wake_up_irq(u32 hwirq, bool enable) {} #endif void imx_gpc_hold_m4_in_sleep(void); void imx_gpc_release_m4_in_sleep(void); +void __init imx_gpcv2_check_dt(void); +void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode); +void imx_gpcv2_set_cpu_power_gate_in_idle(bool pdn); +void imx_gpcv2_enable_rbc(bool enable); bool imx_mu_is_m4_in_low_freq(void); bool imx_mu_is_m4_in_stop(void); void imx_mu_set_m4_run_mode(void); @@ -128,6 +142,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode); void imx6_set_int_mem_clk_lpm(bool enable); void imx6sl_set_wait_clk(bool enter); void imx6sx_low_power_idle(void); +void imx7d_low_power_idle(void); #ifdef CONFIG_HAVE_IMX_MMDC int imx_mmdc_get_ddr_type(void); int imx_mmdc_get_lpddr2_2ch_mode(void); @@ -145,14 +160,18 @@ int imx_cpu_kill(unsigned int cpu); #ifdef CONFIG_SUSPEND void v7_cpu_resume(void); +void ca7_cpu_resume(void); void imx53_suspend(void __iomem *ocram_vbase); extern const u32 imx53_suspend_sz; void imx6_suspend(void __iomem *ocram_vbase); +void imx7_suspend(void __iomem *ocram_vbase); #else static inline void v7_cpu_resume(void) {} +static inline void ca7_cpu_resume(void) {} static inline void imx53_suspend(void __iomem *ocram_vbase) {} static const u32 imx53_suspend_sz; static inline void imx6_suspend(void __iomem *ocram_vbase) {} +static inline void imx7_suspend(void __iomem *ocram_vbase) {} #endif #ifdef CONFIG_HAVE_IMX_DDRC @@ -167,6 +186,7 @@ void imx6dl_pm_init(void); void imx6sl_pm_init(void); void imx6sx_pm_init(void); void imx6ul_pm_init(void); +void imx7d_pm_init(void); void imx7ulp_pm_init(void); #ifdef CONFIG_PM diff --git a/arch/arm/mach-imx/cpuidle-imx7d.c b/arch/arm/mach-imx/cpuidle-imx7d.c new file mode 100644 index 000000000000..8e1e61c5503b --- /dev/null +++ b/arch/arm/mach-imx/cpuidle-imx7d.c @@ -0,0 +1,390 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "common.h" +#include "cpuidle.h" +#include "hardware.h" + +#define XTALOSC24M_OSC_CONFIG0 0x10 +#define XTALOSC24M_OSC_CONFIG1 0x20 +#define XTALOSC24M_OSC_CONFIG2 0x30 +#define XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_CUR_SHIFT 24 +#define XTALOSC24M_OSC_CONFIG0_HYST_MINUS_MASK 0xf +#define XTALOSC24M_OSC_CONFIG0_HYST_MINUS_SHIFT 16 +#define XTALOSC24M_OSC_CONFIG0_HYST_PLUS_MASK 0xf +#define XTALOSC24M_OSC_CONFIG0_HYST_PLUS_SHIFT 12 +#define XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_SHIFT 4 +#define XTALOSC24M_OSC_CONFIG0_ENABLE_SHIFT 1 +#define XTALOSC24M_OSC_CONFIG0_START_SHIFT 0 +#define XTALOSC24M_OSC_CONFIG1_COUNT_RC_CUR_SHIFT 20 +#define XTALOSC24M_OSC_CONFIG1_COUNT_RC_TRG_SHIFT 0 +#define XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_MASK 0xfff +#define XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_SHIFT 0 + +#define XTALOSC_CTRL_24M 0x0 +#define XTALOSC_CTRL_24M_RC_OSC_EN_SHIFT 13 +#define REG_SET 0x4 + +static void __iomem *wfi_iram_base; +static void __iomem *wfi_iram_base_phys; +extern unsigned long iram_tlb_phys_addr; + +struct imx7_pm_base { + phys_addr_t pbase; + void __iomem *vbase; +}; + +struct imx7_cpuidle_pm_info { + phys_addr_t vbase; /* The virtual address of pm_info. */ + phys_addr_t pbase; /* The physical address of pm_info. */ + phys_addr_t resume_addr; /* The physical resume address for asm code */ + u32 pm_info_size; + u32 ttbr; + u32 num_online_cpus; + u32 num_lpi_cpus; + atomic_t val; + atomic_t flag0; + atomic_t flag1; + struct imx7_pm_base ddrc_base; + struct imx7_pm_base ccm_base; + struct imx7_pm_base anatop_base; + struct imx7_pm_base src_base; + struct imx7_pm_base iomuxc_gpr_base; + struct imx7_pm_base gpc_base; + struct imx7_pm_base gic_dist_base; +} __aligned(8); + +static atomic_t master_lpi = ATOMIC_INIT(0); +static atomic_t master_wait = ATOMIC_INIT(0); + +static void (*imx7d_wfi_in_iram_fn)(void __iomem *iram_vbase); +static struct imx7_cpuidle_pm_info *cpuidle_pm_info; + +#define MX7D_POWERDWN_IDLE_PARAM \ + ((1 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) + +#define MX7D_STANDBY_IDLE_PARAM \ + ((1 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_STANDBY << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) + +/* Mapped for the kernel, unlike cpuidle_pm_info->gic_dist_base.vbase */ +static void __iomem *imx7d_cpuidle_gic_base; + +static void imx_pen_lock(int cpu) +{ + if (cpu == 0) { + atomic_set(&cpuidle_pm_info->flag0, 1); + dsb(); + atomic_set(&cpuidle_pm_info->val, cpu); + do { + dsb(); + } while (atomic_read(&cpuidle_pm_info->flag1) == 1 + && atomic_read(&cpuidle_pm_info->val) == cpu) + ; + } else { + atomic_set(&cpuidle_pm_info->flag1, 1); + dsb(); + atomic_set(&cpuidle_pm_info->val, cpu); + do { + dsb(); + } while (atomic_read(&cpuidle_pm_info->flag0) == 1 + && atomic_read(&cpuidle_pm_info->val) == cpu) + ; + } +} + +static void imx_pen_unlock(int cpu) +{ + dsb(); + if (cpu == 0) + atomic_set(&cpuidle_pm_info->flag0, 0); + else + atomic_set(&cpuidle_pm_info->flag1, 0); +} + +static int imx7d_idle_finish(unsigned long val) +{ + if (psci_ops.cpu_suspend) + psci_ops.cpu_suspend(MX7D_POWERDWN_IDLE_PARAM, __pa(cpu_resume)); + else + imx7d_wfi_in_iram_fn(wfi_iram_base); + + return 0; +} + +static bool imx7d_gic_sgis_pending(void) +{ + void __iomem *sgip_base = imx7d_cpuidle_gic_base + 0x1f20; + + return (readl_relaxed(sgip_base + 0x0) | + readl_relaxed(sgip_base + 0x4) | + readl_relaxed(sgip_base + 0x8) | + readl_relaxed(sgip_base + 0xc)); +} + +static DEFINE_SPINLOCK(psci_lock); +static int imx7d_enter_low_power_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + int mode = get_bus_freq_mode(); + + + if ((index == 1) || ((mode != BUS_FREQ_LOW) && index == 2)) { + index = 1; + if (atomic_inc_return(&master_wait) == num_online_cpus()) + imx_gpcv2_set_lpm_mode(WAIT_UNCLOCKED); + + cpu_do_idle(); + + atomic_dec(&master_wait); + imx_gpcv2_set_lpm_mode(WAIT_CLOCKED); + } else { + if (psci_ops.cpu_suspend) { + cpu_pm_enter(); + spin_lock(&psci_lock); + if (atomic_inc_return(&master_lpi) == num_online_cpus()) { + if (imx7d_gic_sgis_pending()) { + atomic_dec(&master_lpi); + index = -1; + goto psci_skip_lpi_flow; + } + + imx_gpcv2_set_lpm_mode(WAIT_UNCLOCKED); + imx_gpcv2_set_cpu_power_gate_in_idle(true); + + cpu_cluster_pm_enter(); + } + spin_unlock(&psci_lock); + + cpu_suspend(0, imx7d_idle_finish); + + spin_lock(&psci_lock); + if (atomic_read(&master_lpi) == num_online_cpus()) { + cpu_cluster_pm_exit(); + imx_gpcv2_set_cpu_power_gate_in_idle(false); + imx_gpcv2_set_lpm_mode(WAIT_CLOCKED); + } + + atomic_dec(&master_lpi); +psci_skip_lpi_flow: + spin_unlock(&psci_lock); + cpu_pm_exit(); + } else { + imx_pen_lock(dev->cpu); + cpuidle_pm_info->num_online_cpus = num_online_cpus(); + ++cpuidle_pm_info->num_lpi_cpus; + cpu_pm_enter(); + if (cpuidle_pm_info->num_lpi_cpus == + cpuidle_pm_info->num_online_cpus) { + /* + * GPC will not wake on SGIs so check for them + * manually here. At this point we know the other cpu + * is in wfi or waiting for the lock and can't send + * any additional IPIs. + */ + if (imx7d_gic_sgis_pending()) { + index = -1; + goto skip_lpi_flow; + } + imx_gpcv2_set_lpm_mode(WAIT_UNCLOCKED); + imx_gpcv2_set_cpu_power_gate_in_idle(true); + cpu_cluster_pm_enter(); + } else { + imx_set_cpu_jump(dev->cpu, ca7_cpu_resume); + } + + cpu_suspend(0, imx7d_idle_finish); + + if (cpuidle_pm_info->num_lpi_cpus == + cpuidle_pm_info->num_online_cpus) { + cpu_cluster_pm_exit(); + imx_gpcv2_set_cpu_power_gate_in_idle(false); + imx_gpcv2_set_lpm_mode(WAIT_CLOCKED); + } + +skip_lpi_flow: + cpu_pm_exit(); + --cpuidle_pm_info->num_lpi_cpus; + imx_pen_unlock(dev->cpu); + } + } + + return index; +} + +static struct cpuidle_driver imx7d_cpuidle_driver = { + .name = "imx7d_cpuidle", + .owner = THIS_MODULE, + .states = { + /* WFI */ + ARM_CPUIDLE_WFI_STATE, + /* WAIT MODE */ + { + .exit_latency = 50, + .target_residency = 75, + .flags = CPUIDLE_FLAG_TIMER_STOP, + .enter = imx7d_enter_low_power_idle, + .name = "WAIT", + .desc = "Clock off", + }, + /* LOW POWER IDLE */ + { + .exit_latency = 10000, + .target_residency = 20000, + .flags = CPUIDLE_FLAG_TIMER_STOP, + .enter = imx7d_enter_low_power_idle, + .name = "LOW-POWER-IDLE", + .desc = "ARM power off", + }, + }, + .state_count = 3, + .safe_state_index = 0, +}; + +int imx7d_enable_rcosc(void) +{ + void __iomem *anatop_base = + (void __iomem *)IMX_IO_P2V(MX7D_ANATOP_BASE_ADDR); + u32 val; + + imx_gpcv2_set_lpm_mode(WAIT_CLOCKED); + /* set RC-OSC freq and turn it on */ + writel_relaxed(0x1 << XTALOSC_CTRL_24M_RC_OSC_EN_SHIFT, + anatop_base + XTALOSC_CTRL_24M + REG_SET); + /* + * config RC-OSC freq + * tune_enable = 1;tune_start = 1;hyst_plus = 0;hyst_minus = 0; + * osc_prog = 0xa7; + */ + writel_relaxed( + 0x4 << XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_CUR_SHIFT | + 0xa7 << XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_SHIFT | + 0x1 << XTALOSC24M_OSC_CONFIG0_ENABLE_SHIFT | + 0x1 << XTALOSC24M_OSC_CONFIG0_START_SHIFT, + anatop_base + XTALOSC24M_OSC_CONFIG0); + /* set count_trg = 0x2dc */ + writel_relaxed( + 0x40 << XTALOSC24M_OSC_CONFIG1_COUNT_RC_CUR_SHIFT | + 0x2dc << XTALOSC24M_OSC_CONFIG1_COUNT_RC_TRG_SHIFT, + anatop_base + XTALOSC24M_OSC_CONFIG1); + /* wait at least 4ms according to hardware design */ + mdelay(6); + /* + * now add some hysteresis, hyst_plus=3, hyst_minus=3 + * (the minimum hysteresis that looks good is 2) + */ + val = readl_relaxed(anatop_base + XTALOSC24M_OSC_CONFIG0); + val &= ~((XTALOSC24M_OSC_CONFIG0_HYST_MINUS_MASK << + XTALOSC24M_OSC_CONFIG0_HYST_MINUS_SHIFT) | + (XTALOSC24M_OSC_CONFIG0_HYST_PLUS_MASK << + XTALOSC24M_OSC_CONFIG0_HYST_PLUS_SHIFT)); + val |= (0x3 << XTALOSC24M_OSC_CONFIG0_HYST_MINUS_SHIFT) | + (0x3 << XTALOSC24M_OSC_CONFIG0_HYST_PLUS_SHIFT); + writel_relaxed(val, anatop_base + XTALOSC24M_OSC_CONFIG0); + /* set the count_1m_trg = 0x2d7 */ + val = readl_relaxed(anatop_base + XTALOSC24M_OSC_CONFIG2); + val &= ~(XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_MASK << + XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_SHIFT); + val |= 0x2d7 << XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_SHIFT; + writel_relaxed(val, anatop_base + XTALOSC24M_OSC_CONFIG2); + /* + * hardware design require to write XTALOSC24M_OSC_CONFIG0 or + * XTALOSC24M_OSC_CONFIG1 to + * make XTALOSC24M_OSC_CONFIG2 write work + */ + val = readl_relaxed(anatop_base + XTALOSC24M_OSC_CONFIG1); + writel_relaxed(val, anatop_base + XTALOSC24M_OSC_CONFIG1); + + return 0; +} + +int __init imx7d_cpuidle_init(void) +{ + wfi_iram_base_phys = (void *)(iram_tlb_phys_addr + + MX7_CPUIDLE_OCRAM_ADDR_OFFSET); + + /* Make sure wfi_iram_base is 8 byte aligned. */ + if ((uintptr_t)(wfi_iram_base_phys) & (FNCPY_ALIGN - 1)) + wfi_iram_base_phys += FNCPY_ALIGN - + ((uintptr_t)wfi_iram_base_phys % (FNCPY_ALIGN)); + + wfi_iram_base = (void *)IMX_IO_P2V((unsigned long) wfi_iram_base_phys); + + cpuidle_pm_info = wfi_iram_base; + cpuidle_pm_info->vbase = (phys_addr_t) wfi_iram_base; + cpuidle_pm_info->pbase = (phys_addr_t) wfi_iram_base_phys; + cpuidle_pm_info->pm_info_size = sizeof(*cpuidle_pm_info); + cpuidle_pm_info->resume_addr = virt_to_phys(ca7_cpu_resume); + cpuidle_pm_info->num_online_cpus = num_online_cpus(); + + cpuidle_pm_info->ddrc_base.pbase = MX7D_DDRC_BASE_ADDR; + cpuidle_pm_info->ddrc_base.vbase = + (void __iomem *)IMX_IO_P2V(MX7D_DDRC_BASE_ADDR); + + cpuidle_pm_info->ccm_base.pbase = MX7D_CCM_BASE_ADDR; + cpuidle_pm_info->ccm_base.vbase = + (void __iomem *)IMX_IO_P2V(MX7D_CCM_BASE_ADDR); + + cpuidle_pm_info->anatop_base.pbase = MX7D_ANATOP_BASE_ADDR; + cpuidle_pm_info->anatop_base.vbase = + (void __iomem *)IMX_IO_P2V(MX7D_ANATOP_BASE_ADDR); + + cpuidle_pm_info->src_base.pbase = MX7D_SRC_BASE_ADDR; + cpuidle_pm_info->src_base.vbase = + (void __iomem *)IMX_IO_P2V(MX7D_SRC_BASE_ADDR); + + cpuidle_pm_info->iomuxc_gpr_base.pbase = MX7D_IOMUXC_GPR_BASE_ADDR; + cpuidle_pm_info->iomuxc_gpr_base.vbase = + (void __iomem *)IMX_IO_P2V(MX7D_IOMUXC_GPR_BASE_ADDR); + + cpuidle_pm_info->gpc_base.pbase = MX7D_GPC_BASE_ADDR; + cpuidle_pm_info->gpc_base.vbase = + (void __iomem *)IMX_IO_P2V(MX7D_GPC_BASE_ADDR); + + cpuidle_pm_info->gic_dist_base.pbase = MX7D_GIC_BASE_ADDR; + cpuidle_pm_info->gic_dist_base.vbase = + (void __iomem *)IMX_IO_P2V(MX7D_GIC_BASE_ADDR); + + imx7d_cpuidle_gic_base = ioremap(MX7D_GIC_BASE_ADDR, MX7D_GIC_SIZE); + + imx7d_enable_rcosc(); + + /* code size should include cpuidle_pm_info size */ + if (!psci_ops.cpu_suspend) { + imx7d_wfi_in_iram_fn = (void *)fncpy(wfi_iram_base + + sizeof(*cpuidle_pm_info), + &imx7d_low_power_idle, + MX7_CPUIDLE_OCRAM_SIZE - sizeof(*cpuidle_pm_info)); + } + + return cpuidle_register(&imx7d_cpuidle_driver, NULL); +} diff --git a/arch/arm/mach-imx/cpuidle.h b/arch/arm/mach-imx/cpuidle.h index ce552c096cae..1d3c93f3f6b9 100644 --- a/arch/arm/mach-imx/cpuidle.h +++ b/arch/arm/mach-imx/cpuidle.h @@ -9,6 +9,8 @@ extern int imx5_cpuidle_init(void); extern int imx6q_cpuidle_init(void); extern int imx6sl_cpuidle_init(void); extern int imx6sx_cpuidle_init(void); +extern int imx7d_cpuidle_init(void); +extern int imx7d_enable_rcosc(void); extern int imx7ulp_cpuidle_init(void); #else static inline int imx5_cpuidle_init(void) @@ -27,6 +29,14 @@ static inline int imx6sx_cpuidle_init(void) { return 0; } +static inline int imx7d_cpuidle_init(void) +{ + return 0; +} +static inline int imx7d_enable_rcosc(void) +{ + return 0; +} static inline int imx7ulp_cpuidle_init(void) { return 0; diff --git a/arch/arm/mach-imx/gpcv2.c b/arch/arm/mach-imx/gpcv2.c new file mode 100644 index 000000000000..3e8ab91cb97f --- /dev/null +++ b/arch/arm/mach-imx/gpcv2.c @@ -0,0 +1,851 @@ +/* + * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "hardware.h" + +#define IMR_NUM 4 +#define GPC_MAX_IRQS (IMR_NUM * 32) +#define GPC_LPCR_A7_BSC 0x0 +#define GPC_LPCR_A7_AD 0x4 +#define GPC_LPCR_M4 0x8 +#define GPC_SLPCR 0x14 +#define GPC_MLPCR 0x20 +#define GPC_PGC_ACK_SEL_A7 0x24 +#define GPC_MISC 0x2c +#define GPC_IMR1_CORE0 0x30 +#define GPC_IMR1_CORE1 0x40 +#define GPC_IMR1_M4 0x50 +#define GPC_SLOT0_CFG 0xb0 +#define GPC_PGC_CPU_MAPPING 0xec +#define GPC_CPU_PGC_SW_PUP_REQ 0xf0 +#define GPC_PU_PGC_SW_PUP_REQ 0xf8 +#define GPC_CPU_PGC_SW_PDN_REQ 0xfc +#define GPC_PU_PGC_SW_PDN_REQ 0x104 +#define GPC_GTOR 0x124 +#define GPC_PGC_C0 0x800 +#define GPC_PGC_C0_PUPSCR 0x804 +#define GPC_PGC_SCU_TIMING 0x890 +#define GPC_PGC_C1 0x840 +#define GPC_PGC_C1_PUPSCR 0x844 +#define GPC_PGC_SCU 0x880 +#define GPC_PGC_FM 0xa00 + +#define BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP 0x70000000 +#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000 +#define BM_LPCR_A7_BSC_LPM1 0xc +#define BM_LPCR_A7_BSC_LPM0 0x3 +#define BP_LPCR_A7_BSC_LPM1 2 +#define BP_LPCR_A7_BSC_LPM0 0 +#define BM_LPCR_M4_MASK_DSM_TRIGGER 0x80000000 +#define BM_SLPCR_EN_DSM 0x80000000 +#define BM_SLPCR_RBC_EN 0x40000000 +#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000 +#define BM_SLPCR_VSTBY 0x4 +#define BM_SLPCR_SBYOS 0x2 +#define BM_SLPCR_BYPASS_PMIC_READY 0x1 +#define BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE 0x10000 +#define BM_LPCR_A7_AD_L2PGE 0x10000 +#define BM_LPCR_A7_AD_EN_C1_PUP 0x800 +#define BM_LPCR_A7_AD_EN_C1_IRQ_PUP 0x400 +#define BM_LPCR_A7_AD_EN_C0_PUP 0x200 +#define BM_LPCR_A7_AD_EN_C0_IRQ_PUP 0x100 +#define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10 +#define BM_LPCR_A7_AD_EN_C1_PDN 0x8 +#define BM_LPCR_A7_AD_EN_C1_WFI_PDN 0x4 +#define BM_LPCR_A7_AD_EN_C0_PDN 0x2 +#define BM_LPCR_A7_AD_EN_C0_WFI_PDN 0x1 + +#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2 +#define BM_GPC_PGC_PCG 0x1 +#define BM_GPC_PGC_CORE_PUPSCR 0x7fff80 + +#define BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK 0x80000000 +#define BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK 0x8000 +#define BM_GPC_MLPCR_MEMLP_CTL_DIS 0x1 + +#define BP_LPCR_A7_BSC_IRQ_SRC 28 + +#define MAX_SLOT_NUMBER 10 +#define A7_LPM_WAIT 0x5 +#define A7_LPM_STOP 0xa + +enum imx_gpc_slot { + CORE0_A7, + CORE1_A7, + SCU_A7, + FAST_MEGA_MIX, + MIPI_PHY, + PCIE_PHY, + USB_OTG1_PHY, + USB_OTG2_PHY, + USB_HSIC_PHY, + CORE0_M4, +}; + +static void __iomem *gpc_base; +static u32 gpcv2_wake_irqs[IMR_NUM]; +static u32 gpcv2_saved_imrs[IMR_NUM]; +static u32 gpcv2_saved_imrs_m4[IMR_NUM]; +static u32 gpcv2_mf_irqs[IMR_NUM]; +static u32 gpcv2_mf_request_on[IMR_NUM]; +static DEFINE_SPINLOCK(gpcv2_lock); + +void imx_gpcv2_add_m4_wake_up_irq(u32 hwirq, bool enable) +{ + unsigned int idx = hwirq / 32; + unsigned long flags; + u32 mask; + + /* Sanity check for SPI irq */ + if (hwirq < 32) + return; + + mask = 1 << hwirq % 32; + spin_lock_irqsave(&gpcv2_lock, flags); + gpcv2_wake_irqs[idx] = enable ? gpcv2_wake_irqs[idx] | mask : + gpcv2_wake_irqs[idx] & ~mask; + spin_unlock_irqrestore(&gpcv2_lock, flags); +} + +static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on) +{ + unsigned int idx = d->hwirq / 32; + unsigned long flags; + u32 mask; + + BUG_ON(idx >= IMR_NUM); + + mask = 1 << d->hwirq % 32; + spin_lock_irqsave(&gpcv2_lock, flags); + gpcv2_wake_irqs[idx] = on ? gpcv2_wake_irqs[idx] | mask : + gpcv2_wake_irqs[idx] & ~mask; + spin_unlock_irqrestore(&gpcv2_lock, flags); + + return 0; +} + +void imx_gpcv2_mask_all(void) +{ + void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0; + int i; + + for (i = 0; i < IMR_NUM; i++) { + gpcv2_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4); + writel_relaxed(~0, reg_imr1 + i * 4); + } +} + +void imx_gpcv2_restore_all(void) +{ + void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0; + int i; + + for (i = 0; i < IMR_NUM; i++) + writel_relaxed(gpcv2_saved_imrs[i], reg_imr1 + i * 4); +} + +void imx_gpcv2_hwirq_unmask(unsigned int hwirq) +{ + void __iomem *reg; + u32 val; + + reg = gpc_base + GPC_IMR1_CORE0 + (hwirq / 32) * 4; + val = readl_relaxed(reg); + val &= ~(1 << hwirq % 32); + writel_relaxed(val, reg); +} + +void imx_gpcv2_hwirq_mask(unsigned int hwirq) +{ + void __iomem *reg; + u32 val; + + reg = gpc_base + GPC_IMR1_CORE0 + (hwirq / 32) * 4; + val = readl_relaxed(reg); + val |= 1 << (hwirq % 32); + writel_relaxed(val, reg); +} + +static void imx_gpcv2_irq_unmask(struct irq_data *d) +{ + imx_gpcv2_hwirq_unmask(d->hwirq); + irq_chip_unmask_parent(d); +} + +static void imx_gpcv2_irq_mask(struct irq_data *d) +{ + imx_gpcv2_hwirq_mask(d->hwirq); + irq_chip_mask_parent(d); +} + +void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core, + bool mode, bool ack) +{ + u32 val; + + if (index >= MAX_SLOT_NUMBER) + pr_err("Invalid slot index!\n"); + /* set slot */ + writel_relaxed(readl_relaxed(gpc_base + GPC_SLOT0_CFG + index * 4) | + ((mode + 1) << (m_core * 2)), + gpc_base + GPC_SLOT0_CFG + index * 4); + + if (ack) { + /* set ack */ + val = readl_relaxed(gpc_base + GPC_PGC_ACK_SEL_A7); + /* clear dummy ack */ + val &= ~(1 << (15 + (mode ? 16 : 0))); + val |= 1 << (m_core + (mode ? 16 : 0)); + writel_relaxed(val, gpc_base + GPC_PGC_ACK_SEL_A7); + } +} + +void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode) +{ + unsigned long flags; + u32 val1, val2; + + spin_lock_irqsave(&gpcv2_lock, flags); + + val1 = readl_relaxed(gpc_base + GPC_LPCR_A7_BSC); + val2 = readl_relaxed(gpc_base + GPC_SLPCR); + + /* all cores' LPM settings must be same */ + val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1); + + val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + + val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN | + BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY); + /* + * GPC: When improper low-power sequence is used, + * the SoC enters low power mode before the ARM core executes WFI. + * + * Software workaround: + * 1) Software should trigger IRQ #32 (IOMUX) to be always pending + * by setting IOMUX_GPR1_IRQ. + * 2) Software should then unmask IRQ #32 in GPC before setting GPC + * Low-Power mode. + * 3) Software should mask IRQ #32 right after GPC Low-Power mode + * is set. + */ + switch (mode) { + case WAIT_CLOCKED: + imx_gpcv2_hwirq_unmask(0); + break; + case WAIT_UNCLOCKED: + val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0; + val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + imx_gpcv2_hwirq_mask(0); + break; + case STOP_POWER_ON: + val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0; + val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + val2 |= BM_SLPCR_EN_DSM; + val2 |= BM_SLPCR_RBC_EN; + val2 |= BM_SLPCR_BYPASS_PMIC_READY; + imx_gpcv2_hwirq_mask(0); + break; + case STOP_POWER_OFF: + val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0; + val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + val2 |= BM_SLPCR_EN_DSM; + val2 |= BM_SLPCR_RBC_EN; + val2 |= BM_SLPCR_SBYOS; + val2 |= BM_SLPCR_VSTBY; + val2 |= BM_SLPCR_BYPASS_PMIC_READY; + imx_gpcv2_hwirq_mask(0); + break; + default: + return; + } + writel_relaxed(val1, gpc_base + GPC_LPCR_A7_BSC); + writel_relaxed(val2, gpc_base + GPC_SLPCR); + + spin_unlock_irqrestore(&gpcv2_lock, flags); +} + +void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn) +{ + u32 val = readl_relaxed(gpc_base + GPC_LPCR_A7_AD); + + val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE); + if (pdn) + val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE; + + writel_relaxed(val, gpc_base + GPC_LPCR_A7_AD); +} + +void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset) +{ + u32 val = readl_relaxed(gpc_base + offset) & (~BM_GPC_PGC_PCG); + + if (enable) + val |= BM_GPC_PGC_PCG; + + writel_relaxed(val, gpc_base + offset); +} + +void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn) +{ + u32 val = readl_relaxed(gpc_base + (pdn ? + GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ)); + + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C1); + val |= BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7; + writel_relaxed(val, gpc_base + (pdn ? + GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ)); + + while ((readl_relaxed(gpc_base + (pdn ? + GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ)) & + BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7) != 0) + ; + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C1); +} + +void imx_gpcv2_set_cpu_power_gate_by_wfi(u32 cpu, bool pdn) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gpcv2_lock, flags); + val = readl_relaxed(gpc_base + GPC_LPCR_A7_AD); + + if (cpu == 0) { + if (pdn) { + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0); + val |= BM_LPCR_A7_AD_EN_C0_WFI_PDN | + BM_LPCR_A7_AD_EN_C0_IRQ_PUP; + } else { + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0); + val &= ~(BM_LPCR_A7_AD_EN_C0_WFI_PDN | + BM_LPCR_A7_AD_EN_C0_IRQ_PUP); + } + } + if (cpu == 1) { + if (pdn) { + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C1); + val |= BM_LPCR_A7_AD_EN_C1_WFI_PDN | + BM_LPCR_A7_AD_EN_C1_IRQ_PUP; + } else { + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C1); + val &= ~(BM_LPCR_A7_AD_EN_C1_WFI_PDN | + BM_LPCR_A7_AD_EN_C1_IRQ_PUP); + } + } + writel_relaxed(val, gpc_base + GPC_LPCR_A7_AD); + spin_unlock_irqrestore(&gpcv2_lock, flags); +} + +void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gpcv2_lock, flags); + + val = readl_relaxed(gpc_base + GPC_LPCR_A7_AD); + if (cpu == 0) { + if (pdn) + val |= BM_LPCR_A7_AD_EN_C0_PDN | + BM_LPCR_A7_AD_EN_C0_PUP; + else + val &= ~(BM_LPCR_A7_AD_EN_C0_PDN | + BM_LPCR_A7_AD_EN_C0_PUP); + } + if (cpu == 1) { + if (pdn) + val |= BM_LPCR_A7_AD_EN_C1_PDN | + BM_LPCR_A7_AD_EN_C1_PUP; + else + val &= ~(BM_LPCR_A7_AD_EN_C1_PDN | + BM_LPCR_A7_AD_EN_C1_PUP); + } + + writel_relaxed(val, gpc_base + GPC_LPCR_A7_AD); + spin_unlock_irqrestore(&gpcv2_lock, flags); +} + +void imx_gpcv2_set_cpu_power_gate_in_idle(bool pdn) +{ + unsigned long flags; + u32 cpu; + + for_each_possible_cpu(cpu) + imx_gpcv2_set_cpu_power_gate_by_lpm(cpu, pdn); + + spin_lock_irqsave(&gpcv2_lock, flags); + + imx_gpcv2_set_m_core_pgc(pdn, GPC_PGC_C0); + if (num_online_cpus() > 1) + imx_gpcv2_set_m_core_pgc(pdn, GPC_PGC_C1); + imx_gpcv2_set_m_core_pgc(pdn, GPC_PGC_SCU); + imx_gpcv2_set_plat_power_gate_by_lpm(pdn); + + if (pdn) { + imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false); + if (num_online_cpus() > 1) + imx_gpcv2_set_slot_ack(2, CORE1_A7, false, false); + imx_gpcv2_set_slot_ack(3, SCU_A7, false, true); + imx_gpcv2_set_slot_ack(6, SCU_A7, true, false); + if (num_online_cpus() > 1) + imx_gpcv2_set_slot_ack(6, CORE1_A7, true, false); + imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true); + } else { + writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 0 * 0x4); + writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 2 * 0x4); + writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 3 * 0x4); + writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 6 * 0x4); + writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 7 * 0x4); + writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 8 * 0x4); + writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK | + BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK, + gpc_base + GPC_PGC_ACK_SEL_A7); + imx_gpcv2_enable_rbc(false); + } + spin_unlock_irqrestore(&gpcv2_lock, flags); +} + +void imx_gpcv2_set_mix_phy_gate_by_lpm(u32 pdn_index, u32 pup_index) +{ + /* set power down slot */ + writel_relaxed(1 << (FAST_MEGA_MIX * 2), + gpc_base + GPC_SLOT0_CFG + pdn_index * 4); + + /* set power up slot */ + writel_relaxed(1 << (FAST_MEGA_MIX * 2 + 1), + gpc_base + GPC_SLOT0_CFG + pup_index * 4); +} + +unsigned int imx_gpcv2_is_mf_mix_off(void) +{ + return readl_relaxed(gpc_base + GPC_PGC_FM); +} + +static void imx_gpcv2_mf_mix_off(void) +{ + int i; + + for (i = 0; i < IMR_NUM; i++) + if (((gpcv2_wake_irqs[i] | gpcv2_mf_request_on[i]) & + gpcv2_mf_irqs[i]) != 0) + return; + + pr_info("Turn off Mega/Fast mix in DSM\n"); + imx_gpcv2_set_slot_ack(1, FAST_MEGA_MIX, false, false); + imx_gpcv2_set_slot_ack(5, FAST_MEGA_MIX, true, false); + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_FM); +} + +int imx_gpcv2_mf_power_on(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long hwirq = desc->irq_data.hwirq; + unsigned int idx = hwirq / 32; + unsigned long flags; + u32 mask = 1 << (hwirq % 32); + + BUG_ON(idx >= IMR_NUM); + + spin_lock_irqsave(&gpcv2_lock, flags); + gpcv2_mf_request_on[idx] = on ? gpcv2_mf_request_on[idx] | mask : + gpcv2_mf_request_on[idx] & ~mask; + spin_unlock_irqrestore(&gpcv2_lock, flags); + + return 0; +} + +void imx_gpcv2_enable_rbc(bool enable) +{ + u32 val; + + /* + * need to mask all interrupts in GPC before + * operating RBC configurations + */ + imx_gpcv2_mask_all(); + + /* configure RBC enable bit */ + val = readl_relaxed(gpc_base + GPC_SLPCR); + val &= ~BM_SLPCR_RBC_EN; + val |= enable ? BM_SLPCR_RBC_EN : 0; + writel_relaxed(val, gpc_base + GPC_SLPCR); + + /* configure RBC count */ + val = readl_relaxed(gpc_base + GPC_SLPCR); + val &= ~BM_SLPCR_REG_BYPASS_COUNT; + val |= enable ? BM_SLPCR_REG_BYPASS_COUNT : 0; + writel(val, gpc_base + GPC_SLPCR); + + /* + * need to delay at least 2 cycles of CKIL(32K) + * due to hardware design requirement, which is + * ~61us, here we use 65us for safe + */ + udelay(65); + + /* restore GPC interrupt mask settings */ + imx_gpcv2_restore_all(); +} + + +void imx_gpcv2_pre_suspend(bool arm_power_off) +{ + void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0; + int i; + + if (arm_power_off) { + imx_gpcv2_set_lpm_mode(STOP_POWER_OFF); + /* enable core0 power down/up with low power mode */ + imx_gpcv2_set_cpu_power_gate_by_lpm(0, true); + /* enable plat power down with low power mode */ + imx_gpcv2_set_plat_power_gate_by_lpm(true); + + /* + * To avoid confuse, we use slot 0~4 for power down, + * slot 5~9 for power up. + * + * Power down slot sequence: + * Slot0 -> CORE0 + * Slot1 -> Mega/Fast MIX + * Slot2 -> SCU + * + * Power up slot sequence: + * Slot5 -> Mega/Fast MIX + * Slot6 -> SCU + * Slot7 -> CORE0 + */ + imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false); + imx_gpcv2_set_slot_ack(2, SCU_A7, false, true); + + if ((!imx_src_is_m4_enabled()) || + (imx_src_is_m4_enabled() && imx_mu_is_m4_in_stop())) + imx_gpcv2_mf_mix_off();; + + imx_gpcv2_set_slot_ack(6, SCU_A7, true, false); + imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true); + + /* enable core0, scu */ + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0); + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU); + } else { + imx_gpcv2_set_lpm_mode(STOP_POWER_ON); + } + + for (i = 0; i < IMR_NUM; i++) { + gpcv2_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4); + writel_relaxed(~gpcv2_wake_irqs[i], reg_imr1 + i * 4); + } +} + +void imx_gpcv2_enable_wakeup_for_m4(void) +{ + void __iomem *reg_imr2 = gpc_base + GPC_IMR1_M4; + u32 i; + + for (i = 0; i < IMR_NUM; i++) { + gpcv2_saved_imrs_m4[i] = readl_relaxed(reg_imr2 + i * 4); + writel_relaxed(~gpcv2_wake_irqs[i], reg_imr2 + i * 4); + } +} + +void imx_gpcv2_disable_wakeup_for_m4(void) +{ + void __iomem *reg_imr2 = gpc_base + GPC_IMR1_M4; + u32 i; + + for (i = 0; i < IMR_NUM; i++) + writel_relaxed(gpcv2_saved_imrs_m4[i], reg_imr2 + i * 4); +} + +void imx_gpcv2_post_resume(void) +{ + void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0; + int i, val; + + /* only external IRQs to wake up LPM and core 0/1 */ + val = readl_relaxed(gpc_base + GPC_LPCR_A7_BSC); + val |= BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP; + writel_relaxed(val, gpc_base + GPC_LPCR_A7_BSC); + /* mask m4 dsm trigger if M4 NOT enabled */ + if (!imx_src_is_m4_enabled()) + writel_relaxed(readl_relaxed(gpc_base + GPC_LPCR_M4) | + BM_LPCR_M4_MASK_DSM_TRIGGER, gpc_base + GPC_LPCR_M4); + /* set mega/fast mix in A7 domain */ + writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_MAPPING); + /* set SCU timing */ + writel_relaxed((0x59 << 10) | 0x5B | (0x2 << 20), + gpc_base + GPC_PGC_SCU_TIMING); + + /* set C0/C1 power up timming per design requirement */ + val = readl_relaxed(gpc_base + GPC_PGC_C0_PUPSCR); + val &= ~BM_GPC_PGC_CORE_PUPSCR; + val |= (0x1A << 7); + writel_relaxed(val, gpc_base + GPC_PGC_C0_PUPSCR); + + val = readl_relaxed(gpc_base + GPC_PGC_C1_PUPSCR); + val &= ~BM_GPC_PGC_CORE_PUPSCR; + val |= (0x1A << 7); + writel_relaxed(val, gpc_base + GPC_PGC_C1_PUPSCR); + + val = readl_relaxed(gpc_base + GPC_SLPCR); + val &= ~(BM_SLPCR_EN_DSM); + if (!imx_src_is_m4_enabled()) + val &= ~(BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN | + BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY); + val |= BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE; + writel_relaxed(val, gpc_base + GPC_SLPCR); + + if (imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { + /* disable memory low power mode */ + val = readl_relaxed(gpc_base + GPC_MLPCR); + val |= BM_GPC_MLPCR_MEMLP_CTL_DIS; + writel_relaxed(val, gpc_base + GPC_MLPCR); + } + + for (i = 0; i < IMR_NUM; i++) + writel_relaxed(gpcv2_saved_imrs[i], reg_imr1 + i * 4); + + imx_gpcv2_set_lpm_mode(WAIT_CLOCKED); + imx_gpcv2_set_cpu_power_gate_by_lpm(0, false); + imx_gpcv2_set_plat_power_gate_by_lpm(false); + + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0); + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU); + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_FM); + for (i = 0; i < MAX_SLOT_NUMBER; i++){ + if (i == 1 || i == 5) /* skip slts m4 uses */ + continue; + writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + i * 0x4); + } + writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK | + BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK, + gpc_base + GPC_PGC_ACK_SEL_A7); + + /* disable RBC */ + imx_gpcv2_enable_rbc(false); +} + +static struct irq_chip imx_gpcv2_chip = { + .name = "GPCV2", + .irq_eoi = irq_chip_eoi_parent, + .irq_mask = imx_gpcv2_irq_mask, + .irq_unmask = imx_gpcv2_irq_unmask, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_wake = imx_gpcv2_irq_set_wake, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif +}; + +static int imx_gpcv2_domain_xlate(struct irq_domain *domain, + struct device_node *controller, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + if (irq_domain_get_of_node(domain) != controller) + return -EINVAL; /* Shouldn't happen, really... */ + if (intsize != 3) + return -EINVAL; /* Not GIC compliant */ + if (intspec[0] != 0) + return -EINVAL; /* No PPI should point to this domain */ + + *out_hwirq = intspec[1]; + *out_type = intspec[2]; + return 0; +} + +static int imx_gpcv2_domain_alloc(struct irq_domain *domain, + unsigned int irq, + unsigned int nr_irqs, void *data) +{ + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + irq_hw_number_t hwirq; + int i; + + if (fwspec->param_count != 3) + return -EINVAL; /* Not GIC compliant */ + if (fwspec->param[0] != 0) + return -EINVAL; /* No PPI should point to this domain */ + + hwirq = fwspec->param[1]; + if (hwirq >= GPC_MAX_IRQS) + return -EINVAL; /* Can't deal with this */ + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, + &imx_gpcv2_chip, NULL); + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 3; + parent_fwspec.param[0] = 0; + parent_fwspec.param[1] = hwirq; + parent_fwspec.param[2] = fwspec->param[2]; + + return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, + &parent_fwspec); +} + +static struct irq_domain_ops imx_gpcv2_domain_ops = { + .xlate = imx_gpcv2_domain_xlate, + .alloc = imx_gpcv2_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int __init imx_gpcv2_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *parent_domain, *domain; + int i, val; + + if (!parent) { + pr_err("%s: no parent, giving up\n", node->full_name); + return -ENODEV; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%s: unable to obtain parent domain\n", node->full_name); + return -ENXIO; + } + + gpc_base = of_iomap(node, 0); + if (WARN_ON(!gpc_base)) + return -ENOMEM; + + domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, + node, &imx_gpcv2_domain_ops, + NULL); + if (!domain) { + iounmap(gpc_base); + return -ENOMEM; + } + + /* Initially mask all interrupts */ + for (i = 0; i < IMR_NUM; i++) { + writel_relaxed(~0, gpc_base + GPC_IMR1_CORE0 + i * 4); + writel_relaxed(~0, gpc_base + GPC_IMR1_CORE1 + i * 4); + } + /* + * Due to hardware design requirement, need to make sure GPR + * interrupt(#32) is unmasked during RUN mode to avoid entering + * DSM by mistake. + */ + writel_relaxed(~0x1, gpc_base + GPC_IMR1_CORE0); + + /* Read supported wakeup source in M/F domain */ + if (cpu_is_imx7d()) { + of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 0, + &gpcv2_mf_irqs[0]); + of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 1, + &gpcv2_mf_irqs[1]); + of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 2, + &gpcv2_mf_irqs[2]); + of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 3, + &gpcv2_mf_irqs[3]); + if (!(gpcv2_mf_irqs[0] | gpcv2_mf_irqs[1] | + gpcv2_mf_irqs[2] | gpcv2_mf_irqs[3])) + pr_info("No wakeup source in Mega/Fast domain found!\n"); + } + + /* only external IRQs to wake up LPM and core 0/1 */ + val = readl_relaxed(gpc_base + GPC_LPCR_A7_BSC); + val |= BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP; + writel_relaxed(val, gpc_base + GPC_LPCR_A7_BSC); + /* mask m4 dsm trigger if M4 NOT enabled */ + if (!imx_src_is_m4_enabled()) + writel_relaxed(readl_relaxed(gpc_base + GPC_LPCR_M4) | + BM_LPCR_M4_MASK_DSM_TRIGGER, gpc_base + GPC_LPCR_M4); + /* set mega/fast mix in A7 domain */ + writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_MAPPING); + /* set SCU timing */ + writel_relaxed((0x59 << 10) | 0x5B | (0x2 << 20), + gpc_base + GPC_PGC_SCU_TIMING); + + /* set C0/C1 power up timming per design requirement */ + val = readl_relaxed(gpc_base + GPC_PGC_C0_PUPSCR); + val &= ~BM_GPC_PGC_CORE_PUPSCR; + val |= (0x1A << 7); + writel_relaxed(val, gpc_base + GPC_PGC_C0_PUPSCR); + + val = readl_relaxed(gpc_base + GPC_PGC_C1_PUPSCR); + val &= ~BM_GPC_PGC_CORE_PUPSCR; + val |= (0x1A << 7); + writel_relaxed(val, gpc_base + GPC_PGC_C1_PUPSCR); + + writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK | + BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK, + gpc_base + GPC_PGC_ACK_SEL_A7); + + val = readl_relaxed(gpc_base + GPC_SLPCR); + val &= ~(BM_SLPCR_EN_DSM); + if (!imx_src_is_m4_enabled()) + val &= ~(BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN | + BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY); + val |= BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE; + writel_relaxed(val, gpc_base + GPC_SLPCR); + + if (imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { + /* disable memory low power mode */ + val = readl_relaxed(gpc_base + GPC_MLPCR); + val |= BM_GPC_MLPCR_MEMLP_CTL_DIS; + writel_relaxed(val, gpc_base + GPC_MLPCR); + } + + /* disable RBC */ + imx_gpcv2_enable_rbc(false); + + /* + * Clear the OF_POPULATED flag set in of_irq_init so that + * later the GPC power domain driver will not be skipped. + */ + of_node_clear_flag(node, OF_POPULATED); + + return 0; +} + +/* + * We cannot use the IRQCHIP_DECLARE macro that lives in + * drivers/irqchip, so we're forced to roll our own. Not very nice. + */ +OF_DECLARE_2(irqchip, imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_init); + +void __init imx_gpcv2_check_dt(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-gpc"); + if (WARN_ON(!np)) + return; + + if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { + pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); + + /* map GPC, so that at least CPUidle and WARs keep working */ + gpc_base = of_iomap(np, 0); + } +} diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S index 766dbdb2ae27..c19cfc3166ae 100644 --- a/arch/arm/mach-imx/headsmp.S +++ b/arch/arm/mach-imx/headsmp.S @@ -21,6 +21,17 @@ diag_reg_offset: ENTRY(v7_secondary_startup) ARM_BE8(setend be) @ go BE8 if entered LE + mrc p15, 0, r0, c0, c0, 0 + ldr r1, =0xf00 + orr r1, r1, #0xff + mov r0, r0, lsr #4 + and r0, r0, r1 + /* 0xc07 is cortex A7's ID */ + ldr r1, =0xc00 + orr r1, r1, #0x7 + cmp r0, r1 + beq secondary_startup + set_diag_reg b secondary_startup ENDPROC(v7_secondary_startup) diff --git a/arch/arm/mach-imx/imx7d_low_power_idle.S b/arch/arm/mach-imx/imx7d_low_power_idle.S new file mode 100644 index 000000000000..85c229700ec4 --- /dev/null +++ b/arch/arm/mach-imx/imx7d_low_power_idle.S @@ -0,0 +1,788 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#define PM_INFO_VBASE_OFFSET 0x0 +#define PM_INFO_PBASE_OFFSET 0x4 +#define PM_INFO_RESUME_ADDR_OFFSET 0x8 +#define PM_INFO_PM_INFO_SIZE_OFFSET 0xc +#define PM_INFO_PM_INFO_TTBR_OFFSET 0x10 +#define PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET 0x14 +#define PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET 0x18 +#define PM_INFO_VAL_OFFSET 0x1c +#define PM_INFO_FLAG0_OFFSET 0x20 +#define PM_INFO_FLAG1_OFFSET 0x24 +#define PM_INFO_MX7D_DDRC_P_OFFSET 0x28 +#define PM_INFO_MX7D_DDRC_V_OFFSET 0x2c +#define PM_INFO_MX7D_CCM_P_OFFSET 0x30 +#define PM_INFO_MX7D_CCM_V_OFFSET 0x34 +#define PM_INFO_MX7D_ANATOP_P_OFFSET 0x38 +#define PM_INFO_MX7D_ANATOP_V_OFFSET 0x3c +#define PM_INFO_MX7D_SRC_P_OFFSET 0x40 +#define PM_INFO_MX7D_SRC_V_OFFSET 0x44 +#define PM_INFO_MX7D_IOMUXC_GPR_P_OFFSET 0x48 +#define PM_INFO_MX7D_IOMUXC_GPR_V_OFFSET 0x4c +#define PM_INFO_MX7D_GPC_P_OFFSET 0x50 +#define PM_INFO_MX7D_GPC_V_OFFSET 0x54 +#define PM_INFO_MX7D_GIC_DIST_P_OFFSET 0x58 +#define PM_INFO_MX7D_GIC_DIST_V_OFFSET 0x5c + +#define MX7D_SRC_GPR1 0x74 +#define MX7D_SRC_GPR2 0x78 +#define MX7D_SRC_GPR3 0x7c +#define MX7D_SRC_GPR4 0x80 +#define MX7D_GPC_IMR1 0x30 +#define MX7D_GPC_IMR2 0x34 +#define MX7D_GPC_IMR3 0x38 +#define MX7D_GPC_IMR4 0x3c +#define DDRC_STAT 0x4 +#define DDRC_PWRCTL 0x30 +#define DDRC_DBG1 0x304 +#define DDRC_DBGCAM 0x308 +#define DDRC_PSTAT 0x3fc +#define DDRC_PCTRL_0 0x490 + +/* + * imx_pen_lock + * + * The reference link of Peterson's algorithm: + * http://en.wikipedia.org/wiki/Peterson's_algorithm + * + * val1 = r1 = !turn (inverted from Peterson's algorithm) + * on cpu 0: + * r2 = flag[0] (in flag0) + * r3 = flag[1] (in flag1) + * on cpu1: + * r2 = flag[1] (in flag1) + * r3 = flag[0] (in flag0) + * + */ + .macro imx_pen_lock + + mov r8, r0 + mrc p15, 0, r5, c0, c0, 5 + and r5, r5, #3 + add r6, r8, #PM_INFO_VAL_OFFSET + cmp r5, #0 + addeq r7, r8, #PM_INFO_FLAG0_OFFSET + addeq r8, r8, #PM_INFO_FLAG1_OFFSET + addne r7, r8, #PM_INFO_FLAG1_OFFSET + addne r8, r8, #PM_INFO_FLAG0_OFFSET + + mov r9, #1 + str r9, [r7] + dsb + str r5, [r6] +1: + dsb + ldr r9, [r8] + cmp r9, #1 + ldreq r9, [r6] + cmpeq r9, r5 + beq 1b + + .endm + + .macro imx_pen_unlock + + dsb + mrc p15, 0, r6, c0, c0, 5 + and r6, r6, #3 + cmp r6, #0 + addeq r7, r0, #PM_INFO_FLAG0_OFFSET + addne r7, r0, #PM_INFO_FLAG1_OFFSET + mov r9, #0 + str r9, [r7] + + .endm + + .macro disable_l1_dcache + + push {r0 - r12, lr} + ldr r7, =v7_flush_dcache_all + mov lr, pc + mov pc, r7 + pop {r0 - r12, lr} + + /* disable d-cache */ + mrc p15, 0, r7, c1, c0, 0 + bic r7, r7, #(1 << 2) + mcr p15, 0, r7, c1, c0, 0 + dsb + isb + + push {r0 - r12, lr} + ldr r7, =v7_flush_dcache_all + mov lr, pc + mov pc, r7 + pop {r0 - r12, lr} + +#ifdef CONFIG_SMP + clrex + + /* Turn off SMP bit. */ + mrc p15, 0, r8, c1, c0, 1 + bic r8, r8, #0x40 + mcr p15, 0, r8, c1, c0, 1 + + isb + dsb +#endif + + .endm + + .macro tlb_set_to_ocram + + /* save ttbr */ + mrc p15, 0, r7, c2, c0, 1 + str r7, [r0, #PM_INFO_PM_INFO_TTBR_OFFSET] + + /* + * To ensure no page table walks occur in DDR, we + * have a another page table stored in IRAM that only + * contains entries pointing to IRAM, AIPS1 and AIPS2. + * We need to set the TTBR1 to the new IRAM TLB. + * Do the following steps: + * 1. Flush the Branch Target Address Cache (BTAC) + * 2. Set TTBR1 to point to IRAM page table. + * 3. Disable page table walks in TTBR0 (PD0 = 1) + * 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0 + * and 2-4G is translated by TTBR1. + */ + + /* Disable Branch Prediction, Z bit in SCTLR. */ + mrc p15, 0, r6, c1, c0, 0 + bic r6, r6, #0x800 + mcr p15, 0, r6, c1, c0, 0 + + /* Flush the BTAC. */ + ldr r6, =0x0 + mcr p15, 0, r6, c7, c1, 6 + + ldr r6, =iram_tlb_phys_addr + ldr r7, [r6] + + dsb + isb + + /* Store the IRAM table in TTBR1 */ + mcr p15, 0, r7, c2, c0, 1 + + /* Read TTBCR and set PD0=1, N = 1 */ + mrc p15, 0, r6, c2, c0, 2 + orr r6, r6, #0x11 + mcr p15, 0, r6, c2, c0, 2 + + dsb + isb + + /* flush the TLB */ + ldr r6, =0x0 + mcr p15, 0, r6, c8, c3, 0 + + .endm + + .macro tlb_back_to_ddr + + /* Read TTBCR and set PD0=0, N = 0 */ + mrc p15, 0, r6, c2, c0, 2 + bic r6, r6, #0x11 + mcr p15, 0, r6, c2, c0, 2 + + dsb + isb + + /* flush the TLB */ + ldr r6, =0x0 + mcr p15, 0, r6, c8, c3, 0 + + /* Enable Branch Prediction, Z bit in SCTLR. */ + mrc p15, 0, r6, c1, c0, 0 + orr r6, r6, #0x800 + mcr p15, 0, r6, c1, c0, 0 + + /* Flush the Branch Target Address Cache (BTAC) */ + ldr r6, =0x0 + mcr p15, 0, r6, c7, c1, 6 + + /* restore ttbr */ + ldr r7, [r0, #PM_INFO_PM_INFO_TTBR_OFFSET] + mcr p15, 0, r7, c2, c0, 1 + + .endm + + /* r10 must be DDRC base address */ + .macro ddrc_enter_self_refresh + + ldr r10, [r0, #PM_INFO_MX7D_DDRC_V_OFFSET] + + /* disable port */ + ldr r7, =0x0 + str r7, [r10, #DDRC_PCTRL_0] + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r10, #DDRC_PWRCTL] + + /* wait rw port_busy clear */ + ldr r6, =(0x1 << 16) + orr r6, r6, #0x1 +2: + ldr r7, [r10, #DDRC_PSTAT] + ands r7, r7, r6 + bne 2b + + ldr r7, =0x1 + str r7, [r10, #DDRC_DBG1] + + ldr r6, =0x36000000 +11: + ldr r7, [r10, #DDRC_DBGCAM] + and r7, r7, r6 + cmp r7, r6 + bne 11b + + /* enter self-refresh bit 5 */ + ldr r7, =(0x1 << 5) + str r7, [r10, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +3: + ldr r7, [r10, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + bne 3b +4: + ldr r7, [r10, #DDRC_STAT] + ands r7, r7, #0x20 + beq 4b + + /* disable dram clk */ + ldr r7, [r10, #DDRC_PWRCTL] + orr r7, r7, #(1 << 3) + str r7, [r10, #DDRC_PWRCTL] + + /* + * TO1.1 adds feature of DDR pads power down, + * although TO1.0 has no such function, but it is + * NOT harmful to program GPR registers for TO1.0, + * it can avoid the logic of version check in idle + * thread. + */ + ldr r10, [r0, #PM_INFO_MX7D_IOMUXC_GPR_V_OFFSET] + ldr r7, =0xf0000 + str r7, [r10] + + /* delay 20us, measured by gpio */ + ldr r7, =20 +12: + subs r7, r7, #0x1 + bne 12b + + .endm + + /* r10 must be DDRC base address */ + .macro ddrc_exit_self_refresh + + cmp r5, #0x1 + ldreq r10, [r0, #PM_INFO_MX7D_IOMUXC_GPR_P_OFFSET] + ldrne r10, [r0, #PM_INFO_MX7D_IOMUXC_GPR_V_OFFSET] + + ldr r7, =0x0 + str r7, [r10] + + ldr r7, =20 +13: + subs r7, r7, #0x1 + bne 13b + + cmp r5, #0x1 + ldreq r10, [r0, #PM_INFO_MX7D_DDRC_P_OFFSET] + ldrne r10, [r0, #PM_INFO_MX7D_DDRC_V_OFFSET] + + ldr r7, =0x0 + str r7, [r10, #DDRC_DBG1] + + ldr r6, =0x30000000 +14: + ldr r7, [r10, #DDRC_DBGCAM] + and r7, r7, r6 + cmp r7, r6 + bne 14b + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r10, #DDRC_PWRCTL] + + /* wait until self-refresh mode exited */ +5: + ldr r7, [r10, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + beq 5b + + /* enable auto self-refresh */ + ldr r7, [r10, #DDRC_PWRCTL] + orr r7, r7, #(1 << 0) + str r7, [r10, #DDRC_PWRCTL] + + ldr r7, =0x1 + str r7, [r10, #DDRC_PCTRL_0] + + .endm + + .macro pll_do_wait_lock +6: + ldr r7, [r10, r8] + ands r7, #0x80000000 + beq 6b + + .endm + + .macro ccm_enter_idle + + ldr r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET] + + /* ungate pfd1 332m for lower axi */ + ldr r7, =0x8000 + str r7, [r10, #0xc8] + + ldr r10, [r0, #PM_INFO_MX7D_CCM_V_OFFSET] + + /* switch ARM CLK to OSC */ + ldr r8, =0x8000 + ldr r7, [r10, r8] + bic r7, r7, #0x7000000 + str r7, [r10, r8] + + /* lower AXI clk from 24MHz to 3MHz */ + ldr r8, =0x8800 + ldr r7, [r10, r8] + orr r7, r7, #0x7 + str r7, [r10, r8] + + /* lower AHB clk from 24MHz to 3MHz */ + ldr r8, =0x9000 + ldr r7, [r10, r8] + orr r7, r7, #0x7 + str r7, [r10, r8] + + /* gate dram clk */ + ldr r8, =0x9880 + ldr r7, [r10, r8] + bic r7, r7, #0x10000000 + str r7, [r10, r8] + + ldr r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET] + + /* gate pfd1 332m */ + ldr r7, =0x8000 + str r7, [r10, #0xc4] + + /* gate system pll pfd div 1 */ + ldr r7, =0x10 + str r7, [r10, #0xb4] + /* power down ARM, 480 and DRAM PLL */ + ldr r7, =0x1000 + str r7, [r10, #0x64] + str r7, [r10, #0xb4] + ldr r7, =0x100000 + str r7, [r10, #0x74] + + .endm + + .macro ccm_exit_idle + + cmp r5, #0x1 + ldreq r10, [r0, #PM_INFO_MX7D_ANATOP_P_OFFSET] + ldrne r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET] + + /* power up ARM, 480 and DRAM PLL */ + ldr r7, =0x1000 + str r7, [r10, #0x68] + ldr r8, =0x60 + pll_do_wait_lock + + ldr r7, =0x1000 + str r7, [r10, #0xb8] + ldr r8, =0xb0 + pll_do_wait_lock + + ldr r7, =0x100000 + str r7, [r10, #0x78] + ldr r8, =0x70 + pll_do_wait_lock + + /* ungate pfd1 332m for lower axi */ + ldr r7, =0x8000 + str r7, [r10, #0xc8] + + /* ungate system pll pfd div 1 */ + ldr r7, =0x10 + str r7, [r10, #0xb8] + + cmp r5, #0x1 + ldreq r10, [r0, #PM_INFO_MX7D_CCM_P_OFFSET] + ldrne r10, [r0, #PM_INFO_MX7D_CCM_V_OFFSET] + + /* switch ARM CLK to PLL */ + ldr r8, =0x8000 + ldr r7, [r10, r8] + orr r7, r7, #0x1000000 + str r7, [r10, r8] + + /* restore AXI clk from 3MHz to 24MHz */ + ldr r8, =0x8800 + ldr r7, [r10, r8] + bic r7, r7, #0x7 + str r7, [r10, r8] + + /* restore AHB clk from 3MHz to 24MHz */ + ldr r8, =0x9000 + ldr r7, [r10, r8] + bic r7, r7, #0x7 + str r7, [r10, r8] + + /* ungate dram clk */ + ldr r8, =0x9880 + ldr r7, [r10, r8] + orr r7, r7, #0x10000000 + str r7, [r10, r8] + + cmp r5, #0x1 + ldreq r10, [r0, #PM_INFO_MX7D_ANATOP_P_OFFSET] + ldrne r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET] + + /* gate pfd1 332m for lower axi */ + ldr r7, =0x8000 + str r7, [r10, #0xc4] + + .endm + + .macro anatop_enter_idle + + ldr r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET] + + /* XTAL to RC-OSC switch */ + ldr r7, [r10] + orr r7, r7, #0x1000 + str r7, [r10] + /* power down XTAL */ + ldr r7, [r10] + orr r7, r7, #0x1 + str r7, [r10] + + /* enable weak 1P0A */ + ldr r7, [r10, #0x200] + orr r7, r7, #0x40000 + str r7, [r10, #0x200] + + /* disable LDO 1P0A */ + ldr r7, [r10, #0x200] + bic r7, r7, #0x1 + str r7, [r10, #0x200] + + /* disable LDO 1P0D */ + ldr r7, [r10, #0x210] + bic r7, r7, #0x1 + str r7, [r10, #0x210] + + /* disable LDO 1P2 */ + ldr r7, [r10, #0x220] + bic r7, r7, #0x1 + str r7, [r10, #0x220] + + /* switch to low power bandgap */ + ldr r7, [r10, #0x270] + orr r7, r7, #0x400 + str r7, [r10, #0x270] + /* power down normal bandgap */ + orr r7, r7, #0x1 + str r7, [r10, #0x270] + + .endm + + .macro anatop_exit_idle + + cmp r5, #0x1 + ldreq r10, [r0, #PM_INFO_MX7D_ANATOP_P_OFFSET] + ldrne r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET] + + /* power on normal bandgap */ + ldr r7, [r10, #0x270] + bic r7, r7, #0x1 + str r7, [r10, #0x270] + /* switch to normal bandgap */ + bic r7, r7, #0x400 + str r7, [r10, #0x270] + + /* enable LDO 1P2 */ + ldr r7, [r10, #0x220] + orr r7, r7, #0x1 + str r7, [r10, #0x220] +7: + ldr r7, [r10, #0x220] + ands r7, #0x20000 + beq 7b + + /* enable LDO 1P0D */ + ldr r7, [r10, #0x210] + orr r7, r7, #0x1 + str r7, [r10, #0x210] +8: + ldr r7, [r10, #0x210] + ands r7, #0x20000 + beq 8b + + /* enable LDO 1P0A */ + ldr r7, [r10, #0x200] + orr r7, r7, #0x1 + str r7, [r10, #0x200] +9: + ldr r7, [r10, #0x200] + ands r7, #0x20000 + beq 9b + /* disable weak 1P0A */ + ldr r7, [r10, #0x200] + bic r7, r7, #0x40000 + str r7, [r10, #0x200] + + /* power up XTAL and wait */ + ldr r7, [r10] + bic r7, r7, #0x1 + str r7, [r10] +10: + ldr r7, [r10] + ands r7, r7, #0x4 + beq 10b + /* RC-OSC to XTAL switch */ + ldr r7, [r10] + bic r7, r7, #0x1000 + str r7, [r10] + + .endm + +.extern iram_tlb_phys_addr + + .align 3 +ENTRY(imx7d_low_power_idle) + push {r0 - r12} + + /* get necessary info from pm_info */ + ldr r1, [r0, #PM_INFO_PBASE_OFFSET] + ldr r2, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET] + + /* + * counting the resume address in iram + * to set it in SRC register. + */ + ldr r5, =imx7d_low_power_idle + ldr r6, =wakeup + sub r6, r6, r5 + add r8, r1, r2 + add r3, r8, r6 + + /* r11 is cpu id */ + mrc p15, 0, r11, c0, c0, 5 + and r11, r11, #3 + cmp r11, #0x0 + ldreq r6, =MX7D_SRC_GPR1 + ldreq r7, =MX7D_SRC_GPR2 + ldrne r6, =MX7D_SRC_GPR3 + ldrne r7, =MX7D_SRC_GPR4 + /* store physical resume addr and pm_info address. */ + ldr r10, [r0, #PM_INFO_MX7D_SRC_V_OFFSET] + str r3, [r10, r6] + str r1, [r10, r7] + + disable_l1_dcache + + tlb_set_to_ocram + + /* check last to sleep */ + ldr r6, [r0, #PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET] + ldr r7, [r0, #PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET] + cmp r6, r7 + bne lpi_enter_done + + ddrc_enter_self_refresh + ccm_enter_idle + anatop_enter_idle + + ldr r10, [r0, #PM_INFO_MX7D_GIC_DIST_V_OFFSET] + ldr r7, =0x0 + ldr r8, =0x1000 + str r7, [r10, r8] + + ldr r10, [r0, #PM_INFO_MX7D_GPC_V_OFFSET] + ldr r4, [r10, #MX7D_GPC_IMR1] + ldr r5, [r10, #MX7D_GPC_IMR2] + ldr r6, [r10, #MX7D_GPC_IMR3] + ldr r7, [r10, #MX7D_GPC_IMR4] + + ldr r8, =0xffffffff + str r8, [r10, #MX7D_GPC_IMR1] + str r8, [r10, #MX7D_GPC_IMR2] + str r8, [r10, #MX7D_GPC_IMR3] + str r8, [r10, #MX7D_GPC_IMR4] + + /* + * enable the RBC bypass counter here + * to hold off the interrupts. RBC counter + * = 8 (240us). With this setting, the latency + * from wakeup interrupt to ARM power up + * is ~250uS. + */ + ldr r8, [r10, #0x14] + bic r8, r8, #(0x3f << 24) + orr r8, r8, #(0x8 << 24) + str r8, [r10, #0x14] + + /* enable the counter. */ + ldr r8, [r10, #0x14] + orr r8, r8, #(0x1 << 30) + str r8, [r10, #0x14] + + /* unmask all the GPC interrupts. */ + str r4, [r10, #MX7D_GPC_IMR1] + str r5, [r10, #MX7D_GPC_IMR2] + str r6, [r10, #MX7D_GPC_IMR3] + str r7, [r10, #MX7D_GPC_IMR4] + + /* + * now delay for a short while (30usec) + * ARM is at 24MHz at this point + * so a short loop should be enough. + * this delay is required to ensure that + * the RBC counter can start counting in + * case an interrupt is already pending + * or in case an interrupt arrives just + * as ARM is about to assert DSM_request. + */ + ldr r4, =5 +rbc_loop: + subs r4, r4, #0x1 + bne rbc_loop + +lpi_enter_done: + + imx_pen_unlock + + wfi + + nop + nop + nop + nop + nop + + nop + nop + nop + nop + nop + + nop + nop + nop + nop + nop + + nop + nop + nop + nop + nop + + nop + nop + nop + nop + nop + + imx_pen_lock + + /* check first to wake */ + ldr r6, [r0, #PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET] + ldr r7, [r0, #PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET] + cmp r6, r7 + bne skip_lpi_flow + + ldr r5, =0x0 + anatop_exit_idle + ccm_exit_idle + ddrc_exit_self_refresh + + ldr r10, [r0, #PM_INFO_MX7D_GIC_DIST_V_OFFSET] + ldr r7, =0x1 + ldr r8, =0x1000 + str r7, [r10, r8] + +skip_lpi_flow: + tlb_back_to_ddr + +#ifdef CONFIG_SMP + /* Turn on SMP bit. */ + mrc p15, 0, r7, c1, c0, 1 + orr r7, r7, #0x40 + mcr p15, 0, r7, c1, c0, 1 + + isb +#endif + + /* enable d-cache */ + mrc p15, 0, r7, c1, c0, 0 + orr r7, r7, #(1 << 2) + mcr p15, 0, r7, c1, c0, 0 + dsb + isb + + /* Restore registers */ + pop {r0 - r12} + mov pc, lr + +wakeup: + + /* invalidate L1 I-cache first */ + mov r1, #0x0 + mcr p15, 0, r1, c7, c5, 0 + mcr p15, 0, r1, c7, c5, 0 + mcr p15, 0, r1, c7, c5, 6 + /* enable the Icache and branch prediction */ + mov r1, #0x1800 + mcr p15, 0, r1, c1, c0, 0 + isb + + imx_pen_lock + + /* check first to wake */ + ldr r6, [r0, #PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET] + ldr r7, [r0, #PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET] + cmp r6, r7 + bne wakeup_skip_lpi_flow + + ldr r5, =0x1 + anatop_exit_idle + ccm_exit_idle + ddrc_exit_self_refresh + +wakeup_skip_lpi_flow: + /* get physical resume address from pm_info. */ + ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET] + + /* Restore registers */ + mov pc, lr + .ltorg +ENDPROC(imx7d_low_power_idle) diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c index 6baead31ec29..c303ae3b0dfa 100644 --- a/arch/arm/mach-imx/mach-imx7d.c +++ b/arch/arm/mach-imx/mach-imx7d.c @@ -13,6 +13,13 @@ #include #include "common.h" +#include "cpuidle.h" + +static struct property device_disabled = { + .name = "status", + .length = sizeof("disabled"), + .value = "disabled", +}; static int ar8031_phy_fixup(struct phy_device *dev) { @@ -76,6 +83,17 @@ static inline void imx7d_enet_init(void) imx7d_enet_clk_sel(); } +static inline void imx7d_disable_arm_arch_timer(void) +{ + struct device_node *node; + + node = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); + if (node) { + pr_info("disable arm arch timer for nosmp!\n"); + of_add_property(node, &device_disabled); + } +} + static void __init imx7d_init_machine(void) { struct device *parent; @@ -85,20 +103,27 @@ static void __init imx7d_init_machine(void) pr_warn("failed to initialize soc device\n"); imx_anatop_init(); + of_platform_default_populate(NULL, NULL, parent); + imx7d_pm_init(); imx7d_enet_init(); } static void __init imx7d_init_late(void) { + imx7d_cpuidle_init(); if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); } static void __init imx7d_init_irq(void) { + imx_gpcv2_check_dt(); imx_init_revision_from_anatop(); imx_src_init(); irqchip_init(); +#ifndef CONFIG_SMP + imx7d_disable_arm_arch_timer(); +#endif } static void __init imx7d_map_io(void) @@ -116,6 +141,7 @@ static const char *const imx7d_dt_compat[] __initconst = { DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)") .map_io = imx7d_map_io, + .smp = smp_ops(imx_smp_ops), .init_irq = imx7d_init_irq, .init_machine = imx7d_init_machine, .init_late = imx7d_init_late, diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c index 7be31e7e91b3..90dccaa66c89 100644 --- a/arch/arm/mach-imx/platsmp.c +++ b/arch/arm/mach-imx/platsmp.c @@ -47,15 +47,39 @@ static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle) return 0; } +#define MXC_ARCH_CA7 0xc07 +static unsigned long __mxc_arch_type; + +static inline bool arm_is_ca7(void) +{ + return __mxc_arch_type == MXC_ARCH_CA7; +} /* * Initialise the CPU possible map early - this describes the CPUs * which may be present or become present in the system. */ static void __init imx_smp_init_cpus(void) { + unsigned long arch_type; int i, ncores; - ncores = scu_get_core_count(scu_base); + asm volatile( + ".align 4\n" + "mrc p15, 0, %0, c0, c0, 0\n" + : "=r" (arch_type) + ); + /* MIDR[15:4] defines ARCH type */ + __mxc_arch_type = (arch_type >> 4) & 0xfff; + + if (arm_is_ca7()) { + unsigned long val; + + /* CA7 core number, [25:24] of CP15 L2CTLR */ + asm volatile("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); + ncores = ((val >> 24) & 0x3) + 1; + } else { + ncores = scu_get_core_count(scu_base); + } for (i = ncores; i < NR_CPUS; i++) set_cpu_possible(i, false); @@ -63,11 +87,15 @@ static void __init imx_smp_init_cpus(void) void imx_smp_prepare(void) { + if (arm_is_ca7()) + return; scu_enable(scu_base); } static void __init imx_smp_prepare_cpus(unsigned int max_cpus) { + if (arm_is_ca7()) + return; imx_smp_prepare(); /* diff --git a/arch/arm/mach-imx/pm-imx7.c b/arch/arm/mach-imx/pm-imx7.c index a1e3084798f5..e9f4d86f21aa 100644 --- a/arch/arm/mach-imx/pm-imx7.c +++ b/arch/arm/mach-imx/pm-imx7.c @@ -10,27 +10,373 @@ */ #include +#include #include #include +#include #include #include +#include +#include +#include +#include #include +#include +#include +#include +#include #include +#include #include #include +#include +#include #include +#include #include #include +#include #include +#include + #include "common.h" #include "hardware.h" +#include "cpuidle.h" +#define MX7_SUSPEND_OCRAM_SIZE 0x1000 +#define MX7_MAX_DDRC_NUM 32 +#define MX7_MAX_DDRC_PHY_NUM 16 + +#define MX7_SUSPEND_IRAM_ADDR_OFFSET 0 +#define READ_DATA_FROM_HARDWARE 0 + +#define UART_UCR1 0x80 +#define UART_UCR2 0x84 +#define UART_UCR3 0x88 +#define UART_UCR4 0x8c +#define UART_UFCR 0x90 +#define UART_UESC 0x9c +#define UART_UTIM 0xa0 +#define UART_UBIR 0xa4 +#define UART_UBMR 0xa8 +#define UART_UBRC 0xac +#define UART_UTS 0xb4 + +#define MAX_IOMUXC_GPR 23 +#define MAX_UART_IO 4 +#define MAX_CCM_LPCG 167 +#define MAX_GPT 3 +#define MAX_GPIO_ROW 7 +#define MAX_GPIO_COL 8 + +#define UART_RX_IO 0x128 +#define UART_RX_PAD 0x398 +#define UART_TX_IO 0x12c +#define UART_TX_PAD 0x39c + +#define GPT_CR 0x0 +#define GPT_PR 0x4 +#define GPT_IR 0xc + +#define CCM_LPCG_START 0x4040 +#define CCM_LPCG_STEP 0x10 +#define CCM_EIM_LPCG 0x4160 +#define CCM_PXP_LPCG 0x44c0 +#define CCM_PCIE_LPCG 0x4600 + +#define BM_CCM_ROOT_POST_PODF 0x3f +#define BM_CCM_ROOT_PRE_PODF 0x70000 +#define BM_CCM_ROOT_MUX 0x7000000 +#define BM_CCM_ROOT_ENABLE 0x10000000 + +#define BM_SYS_COUNTER_CNTCR_FCR1 0x200 +#define BM_SYS_COUNTER_CNTCR_FCR0 0x100 + +#define PFD_A_OFFSET 0xc0 +#define PFD_B_OFFSET 0xd0 + +#define PLL_ARM_OFFSET 0x60 +#define PLL_DDR_OFFSET 0x70 +#define PLL_DDR_SS_OFFSET 0x80 +#define PLL_DDR_NUM_OFFSET 0x90 +#define PLL_DDR_DENOM_OFFSET 0xa0 +#define PLL_480_OFFSET 0xb0 +#define PLL_ENET_OFFSET 0xe0 +#define PLL_AUDIO_OFFSET 0xf0 +#define PLL_AUDIO_SS_OFFSET 0x100 +#define PLL_AUDIO_NUM_OFFSET 0x110 +#define PLL_AUDIO_DENOM_OFFSET 0x120 +#define PLL_VIDEO_OFFSET 0x130 +#define PLL_VIDEO_SS_OFFSET 0x140 +#define PLL_VIDEO_NUM_OFFSET 0x150 +#define PLL_VIDEO_DENOM_OFFSET 0x160 + +#define REG_SET 0x4 +#define REG_CLR 0x8 + +#define GPIO_DR 0x0 +#define GPIO_GDIR 0x4 +#define GPIO_ICR1 0xc +#define GPIO_ICR2 0x10 +#define GPIO_IMR 0x14 +#define GPIO_EDGE 0x1c + +#define M4RCR 0x0C +#define M4_SP_OFF 0x00 +#define M4_PC_OFF 0x04 +#define M4_RCR_HALT 0xAB +#define M4_RCR_GO 0xAA #define M4_OCRAMS_RESERVED_SIZE 0xc extern unsigned long iram_tlb_base_addr; extern unsigned long iram_tlb_phys_addr; +static unsigned int *ocram_saved_in_ddr; +static void __iomem *ocram_base; +static unsigned int ocram_size; +static unsigned int *lpm_ocram_saved_in_ddr; +static void __iomem *lpm_ocram_base; + +static unsigned int *lpm_m4tcm_saved_in_ddr; +static void __iomem *lpm_m4tcm_base; +static void __iomem *m4_bootrom_base; + +static unsigned int lpm_ocram_size; +static void __iomem *ccm_base; +static void __iomem *lpsr_base; +static void __iomem *console_base; +static void __iomem *suspend_ocram_base; +static void __iomem *iomuxc_base; +static void __iomem *gpt1_base; +static void __iomem *system_counter_ctrl_base; +static void __iomem *system_counter_cmp_base; +static void __iomem *gpio1_base; +static void (*imx7_suspend_in_ocram_fn)(void __iomem *ocram_vbase); +struct imx7_cpu_pm_info *pm_info; +static bool lpsr_enabled; +static u32 iomuxc_gpr[MAX_IOMUXC_GPR]; +static u32 uart1_io[MAX_UART_IO]; +static u32 ccm_lpcg[MAX_CCM_LPCG]; +static u32 ccm_root[][2] = { + {0x8000, 0}, {0x8080, 0}, {0x8100, 0}, {0x8800, 0}, + {0x8880, 0}, {0x8900, 0}, {0x8980, 0}, {0x9000, 0}, + {0x9800, 0}, {0x9880, 0}, {0xa000, 0}, {0xa080, 0}, + {0xa100, 0}, {0xa180, 0}, {0xa200, 0}, {0xa280, 0}, + {0xa300, 0}, {0xa380, 0}, {0xa400, 0}, {0xa480, 0}, + {0xa500, 0}, {0xa580, 0}, {0xa600, 0}, {0xa680, 0}, + {0xa700, 0}, {0xa780, 0}, {0xa800, 0}, {0xa880, 0}, + {0xa900, 0}, {0xa980, 0}, {0xaa00, 0}, {0xaa80, 0}, + {0xab00, 0}, {0xab80, 0}, {0xac00, 0}, {0xac80, 0}, + {0xad00, 0}, {0xad80, 0}, {0xae00, 0}, {0xae80, 0}, + {0xaf00, 0}, {0xaf80, 0}, {0xb000, 0}, {0xb080, 0}, + {0xb100, 0}, {0xb180, 0}, {0xb200, 0}, {0xb280, 0}, + {0xb300, 0}, {0xb380, 0}, {0xb400, 0}, {0xb480, 0}, + {0xb500, 0}, {0xb580, 0}, {0xb600, 0}, {0xb680, 0}, + {0xb700, 0}, {0xb780, 0}, {0xb800, 0}, {0xb880, 0}, + {0xb900, 0}, {0xb980, 0}, {0xba00, 0}, {0xba80, 0}, + {0xbb00, 0}, {0xbb80, 0}, {0xbc00, 0}, {0xbc80, 0}, + {0xbd00, 0}, {0xbd80, 0}, {0xbe00, 0}, +}; +static u32 pfd_a, pfd_b; +static u32 pll[15]; +static u32 gpt1_regs[MAX_GPT]; +static u32 sys_ctrl_reg, sys_cmp_reg; +static u32 gpio_reg[MAX_GPIO_ROW][MAX_GPIO_COL]; +/* + * suspend ocram space layout: + * ======================== high address ====================== + * . + * . + * . + * ^ + * ^ + * ^ + * imx7_suspend code + * PM_INFO structure(imx7_cpu_pm_info) + * ======================== low address ======================= + */ + +struct imx7_pm_base { + phys_addr_t pbase; + void __iomem *vbase; +}; + +struct imx7_pm_socdata { + u32 ddr_type; + const char *ddrc_compat; + const char *src_compat; + const char *iomuxc_compat; + const char *gpc_compat; + const u32 ddrc_num; + const u32 (*ddrc_offset)[2]; + const u32 ddrc_phy_num; + const u32 (*ddrc_phy_offset)[2]; +}; + +static const u32 imx7d_ddrc_lpddr3_setting[][2] __initconst = { + { 0x0, READ_DATA_FROM_HARDWARE }, + { 0x1a0, READ_DATA_FROM_HARDWARE }, + { 0x1a4, READ_DATA_FROM_HARDWARE }, + { 0x1a8, READ_DATA_FROM_HARDWARE }, + { 0x64, READ_DATA_FROM_HARDWARE }, + { 0xd0, READ_DATA_FROM_HARDWARE }, + { 0xdc, READ_DATA_FROM_HARDWARE }, + { 0xe0, READ_DATA_FROM_HARDWARE }, + { 0xe4, READ_DATA_FROM_HARDWARE }, + { 0xf4, READ_DATA_FROM_HARDWARE }, + { 0x100, READ_DATA_FROM_HARDWARE }, + { 0x104, READ_DATA_FROM_HARDWARE }, + { 0x108, READ_DATA_FROM_HARDWARE }, + { 0x10c, READ_DATA_FROM_HARDWARE }, + { 0x110, READ_DATA_FROM_HARDWARE }, + { 0x114, READ_DATA_FROM_HARDWARE }, + { 0x118, READ_DATA_FROM_HARDWARE }, + { 0x120, READ_DATA_FROM_HARDWARE }, + { 0x11c, READ_DATA_FROM_HARDWARE }, + { 0x180, READ_DATA_FROM_HARDWARE }, + { 0x184, READ_DATA_FROM_HARDWARE }, + { 0x190, READ_DATA_FROM_HARDWARE }, + { 0x194, READ_DATA_FROM_HARDWARE }, + { 0x200, READ_DATA_FROM_HARDWARE }, + { 0x204, READ_DATA_FROM_HARDWARE }, + { 0x210, READ_DATA_FROM_HARDWARE }, + { 0x214, READ_DATA_FROM_HARDWARE }, + { 0x218, READ_DATA_FROM_HARDWARE }, + { 0x240, READ_DATA_FROM_HARDWARE }, + { 0x244, READ_DATA_FROM_HARDWARE }, +}; + +static const u32 imx7d_ddrc_phy_lpddr3_setting[][2] __initconst = { + { 0x0, READ_DATA_FROM_HARDWARE }, + { 0x4, READ_DATA_FROM_HARDWARE }, + { 0x8, READ_DATA_FROM_HARDWARE }, + { 0x10, READ_DATA_FROM_HARDWARE }, + { 0xb0, READ_DATA_FROM_HARDWARE }, + { 0x1c, READ_DATA_FROM_HARDWARE }, + { 0x9c, READ_DATA_FROM_HARDWARE }, + { 0x7c, READ_DATA_FROM_HARDWARE }, + { 0x80, READ_DATA_FROM_HARDWARE }, + { 0x84, READ_DATA_FROM_HARDWARE }, + { 0x88, READ_DATA_FROM_HARDWARE }, + { 0x6c, READ_DATA_FROM_HARDWARE }, + { 0x20, READ_DATA_FROM_HARDWARE }, + { 0x30, READ_DATA_FROM_HARDWARE }, + { 0x50, 0x01000008 }, + { 0x50, 0x00000008 }, + { 0xc0, 0x0e487304 }, + { 0xc0, 0x0e4c7304 }, + { 0xc0, 0x0e4c7306 }, + { 0xc0, 0x0e487304 }, +}; + +static const u32 imx7d_ddrc_ddr3_setting[][2] __initconst = { + { 0x0, READ_DATA_FROM_HARDWARE }, + { 0x1a0, READ_DATA_FROM_HARDWARE }, + { 0x1a4, READ_DATA_FROM_HARDWARE }, + { 0x1a8, READ_DATA_FROM_HARDWARE }, + { 0x64, READ_DATA_FROM_HARDWARE }, + { 0x490, READ_DATA_FROM_HARDWARE }, + { 0xd0, READ_DATA_FROM_HARDWARE }, + { 0xd4, READ_DATA_FROM_HARDWARE }, + { 0xdc, READ_DATA_FROM_HARDWARE }, + { 0xe0, READ_DATA_FROM_HARDWARE }, + { 0xe4, READ_DATA_FROM_HARDWARE }, + { 0xf4, READ_DATA_FROM_HARDWARE }, + { 0x100, READ_DATA_FROM_HARDWARE }, + { 0x104, READ_DATA_FROM_HARDWARE }, + { 0x108, READ_DATA_FROM_HARDWARE }, + { 0x10c, READ_DATA_FROM_HARDWARE }, + { 0x110, READ_DATA_FROM_HARDWARE }, + { 0x114, READ_DATA_FROM_HARDWARE }, + { 0x120, READ_DATA_FROM_HARDWARE }, + { 0x180, READ_DATA_FROM_HARDWARE }, + { 0x190, READ_DATA_FROM_HARDWARE }, + { 0x194, READ_DATA_FROM_HARDWARE }, + { 0x200, READ_DATA_FROM_HARDWARE }, + { 0x204, READ_DATA_FROM_HARDWARE }, + { 0x210, READ_DATA_FROM_HARDWARE }, + { 0x214, READ_DATA_FROM_HARDWARE }, + { 0x218, READ_DATA_FROM_HARDWARE }, + { 0x240, READ_DATA_FROM_HARDWARE }, + { 0x244, READ_DATA_FROM_HARDWARE }, +}; + +static const u32 imx7d_ddrc_phy_ddr3_setting[][2] __initconst = { + { 0x0, READ_DATA_FROM_HARDWARE }, + { 0x4, READ_DATA_FROM_HARDWARE }, + { 0x10, READ_DATA_FROM_HARDWARE }, + { 0xb0, READ_DATA_FROM_HARDWARE }, + { 0x9c, READ_DATA_FROM_HARDWARE }, + { 0x7c, READ_DATA_FROM_HARDWARE }, + { 0x80, READ_DATA_FROM_HARDWARE }, + { 0x84, READ_DATA_FROM_HARDWARE }, + { 0x88, READ_DATA_FROM_HARDWARE }, + { 0x6c, READ_DATA_FROM_HARDWARE }, + { 0x20, READ_DATA_FROM_HARDWARE }, + { 0x30, READ_DATA_FROM_HARDWARE }, + { 0x50, 0x01000010 }, + { 0x50, 0x00000010 }, + { 0xc0, 0x0e407304 }, + { 0xc0, 0x0e447304 }, + { 0xc0, 0x0e447306 }, + { 0xc0, 0x0e407304 }, +}; + +static const struct imx7_pm_socdata imx7d_pm_data_lpddr3 __initconst = { + .ddrc_compat = "fsl,imx7d-ddrc", + .src_compat = "fsl,imx7d-src", + .iomuxc_compat = "fsl,imx7d-iomuxc", + .gpc_compat = "fsl,imx7d-gpc", + .ddrc_num = ARRAY_SIZE(imx7d_ddrc_lpddr3_setting), + .ddrc_offset = imx7d_ddrc_lpddr3_setting, + .ddrc_phy_num = ARRAY_SIZE(imx7d_ddrc_phy_lpddr3_setting), + .ddrc_phy_offset = imx7d_ddrc_phy_lpddr3_setting, +}; + +static const struct imx7_pm_socdata imx7d_pm_data_ddr3 __initconst = { + .ddrc_compat = "fsl,imx7d-ddrc", + .src_compat = "fsl,imx7d-src", + .iomuxc_compat = "fsl,imx7d-iomuxc", + .gpc_compat = "fsl,imx7d-gpc", + .ddrc_num = ARRAY_SIZE(imx7d_ddrc_ddr3_setting), + .ddrc_offset = imx7d_ddrc_ddr3_setting, + .ddrc_phy_num = ARRAY_SIZE(imx7d_ddrc_phy_ddr3_setting), + .ddrc_phy_offset = imx7d_ddrc_phy_ddr3_setting, +}; + +/* + * This structure is for passing necessary data for low level ocram + * suspend code(arch/arm/mach-imx/suspend-imx7.S), if this struct + * definition is changed, the offset definition in + * arch/arm/mach-imx/suspend-imx7.S must be also changed accordingly, + * otherwise, the suspend to ocram function will be broken! + */ +struct imx7_cpu_pm_info { + u32 m4_reserve0; + u32 m4_reserve1; + u32 m4_reserve2; + phys_addr_t pbase; /* The physical address of pm_info. */ + phys_addr_t resume_addr; /* The physical resume address for asm code */ + u32 ddr_type; + u32 pm_info_size; /* Size of pm_info. */ + struct imx7_pm_base ddrc_base; + struct imx7_pm_base ddrc_phy_base; + struct imx7_pm_base src_base; + struct imx7_pm_base iomuxc_gpr_base; + struct imx7_pm_base ccm_base; + struct imx7_pm_base gpc_base; + struct imx7_pm_base snvs_base; + struct imx7_pm_base anatop_base; + struct imx7_pm_base lpsr_base; + struct imx7_pm_base gic_base; + u32 ttbr1; /* Store TTBR1 */ + u32 ddrc_num; /* Number of DDRC which need saved/restored. */ + u32 ddrc_val[MX7_MAX_DDRC_NUM][2]; /* To save offset and value */ + u32 ddrc_phy_num; /* Number of DDRC which need saved/restored. */ + u32 ddrc_phy_val[MX7_MAX_DDRC_NUM][2]; /* To save offset and value */ +} __aligned(8); + static struct map_desc imx7_pm_io_desc[] __initdata = { imx_map_entry(MX7D, AIPS1, MT_DEVICE), imx_map_entry(MX7D, AIPS2, MT_DEVICE), @@ -42,6 +388,485 @@ static const char * const low_power_ocram_match[] __initconst = { NULL }; +static void imx7_gpio_save(void) +{ + u32 i; + + for (i = 0; i < 7; i++) { + gpio_reg[i][0] = readl_relaxed(gpio1_base + + (i << 16) + GPIO_DR); + gpio_reg[i][1] = readl_relaxed(gpio1_base + + (i << 16) + GPIO_GDIR); + gpio_reg[i][3] = readl_relaxed(gpio1_base + + (i << 16) + GPIO_ICR1); + gpio_reg[i][4] = readl_relaxed(gpio1_base + + (i << 16) + GPIO_ICR2); + gpio_reg[i][5] = readl_relaxed(gpio1_base + + (i << 16) + GPIO_IMR); + gpio_reg[i][7] = readl_relaxed(gpio1_base + + (i << 16) + GPIO_EDGE); + } +} + +static void imx7_gpio_restore(void) +{ + u32 i, val; + + for (i = 0; i < 7; i++) { + writel_relaxed(gpio_reg[i][1], gpio1_base + + (i << 16) + GPIO_GDIR); + writel_relaxed(gpio_reg[i][3], gpio1_base + + (i << 16) + GPIO_ICR1); + writel_relaxed(gpio_reg[i][4], gpio1_base + + (i << 16) + GPIO_ICR2); + writel_relaxed(gpio_reg[i][5], gpio1_base + + (i << 16) + GPIO_IMR); + writel_relaxed(gpio_reg[i][7], gpio1_base + + (i << 16) + GPIO_EDGE); + /* only restore output gpio value */ + val = readl_relaxed(gpio1_base + (i << 16) + GPIO_DR) | + (gpio_reg[i][0] & gpio_reg[i][1]); + writel_relaxed(val, gpio1_base + (i << 16) + GPIO_DR); + } +} + +static void imx7_ccm_save(void) +{ + u32 i; + + for (i = 0; i < MAX_CCM_LPCG; i++) + ccm_lpcg[i] = readl_relaxed(pm_info->ccm_base.vbase + + i * CCM_LPCG_STEP + CCM_LPCG_START); + pfd_a = readl_relaxed(pm_info->anatop_base.vbase + PFD_A_OFFSET); + pfd_b = readl_relaxed(pm_info->anatop_base.vbase + PFD_B_OFFSET); + + pll[0] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_ARM_OFFSET); + pll[1] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_DDR_OFFSET); + pll[2] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_DDR_SS_OFFSET); + pll[3] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_DDR_NUM_OFFSET); + pll[4] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_DDR_DENOM_OFFSET); + pll[5] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_480_OFFSET); + pll[6] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_ENET_OFFSET); + pll[7] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_AUDIO_OFFSET); + pll[8] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_AUDIO_SS_OFFSET); + pll[9] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_AUDIO_NUM_OFFSET); + pll[10] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_AUDIO_DENOM_OFFSET); + pll[11] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_VIDEO_OFFSET); + pll[12] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_VIDEO_SS_OFFSET); + pll[13] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_VIDEO_NUM_OFFSET); + pll[14] = readl_relaxed(pm_info->anatop_base.vbase + + PLL_VIDEO_DENOM_OFFSET); + + /* enable all PLLs/PFDs for saving CCM root */ + writel_relaxed(0x1c000070, pm_info->anatop_base.vbase + + PLL_480_OFFSET + 0x8); + writel_relaxed(0x80808080, pm_info->anatop_base.vbase + + PFD_A_OFFSET + 0x8); + writel_relaxed(0x80808080, pm_info->anatop_base.vbase + + PFD_B_OFFSET + 0x8); + writel_relaxed(0x1fc0, pm_info->anatop_base.vbase + + PLL_ENET_OFFSET + 0x4); + writel_relaxed(0x12000, pm_info->anatop_base.vbase + + PLL_AUDIO_OFFSET); + writel_relaxed(0x12000, pm_info->anatop_base.vbase + + PLL_VIDEO_OFFSET); + + for (i = 0; i < sizeof(ccm_root) / 8; i++) + ccm_root[i][1] = readl_relaxed( + pm_info->ccm_base.vbase + ccm_root[i][0]); +} + +static void imx7_ccm_restore(void) +{ + u32 i, val; + + /* enable all PLLs/PFDs for restoring CCM root */ + writel_relaxed(0x1c000070, pm_info->anatop_base.vbase + + PLL_480_OFFSET + REG_CLR); + writel_relaxed(0x80808080, pm_info->anatop_base.vbase + + PFD_A_OFFSET + REG_CLR); + writel_relaxed(0x80808080, pm_info->anatop_base.vbase + + PFD_B_OFFSET + REG_CLR); + writel_relaxed(0x1fc0, pm_info->anatop_base.vbase + + PLL_ENET_OFFSET + REG_SET); + writel_relaxed(0x12000, pm_info->anatop_base.vbase + + PLL_AUDIO_OFFSET); + writel_relaxed(0x12000, pm_info->anatop_base.vbase + + PLL_VIDEO_OFFSET); + + for (i = 0; i < sizeof(ccm_root) / 8; i++) { + val = readl_relaxed(pm_info->ccm_base.vbase + ccm_root[i][0]); + /* restore post podf */ + val &= ~BM_CCM_ROOT_POST_PODF; + val |= ccm_root[i][1] & BM_CCM_ROOT_POST_PODF; + writel_relaxed(val, pm_info->ccm_base.vbase + ccm_root[i][0]); + /* resotre pre podf */ + val &= ~BM_CCM_ROOT_PRE_PODF; + val |= ccm_root[i][1] & BM_CCM_ROOT_PRE_PODF; + writel_relaxed(val, pm_info->ccm_base.vbase + ccm_root[i][0]); + /* restore mux */ + val &= ~BM_CCM_ROOT_MUX; + val |= ccm_root[i][1] & BM_CCM_ROOT_MUX; + writel_relaxed(val, pm_info->ccm_base.vbase + ccm_root[i][0]); + /* restore enable */ + val &= ~BM_CCM_ROOT_ENABLE; + val |= ccm_root[i][1] & BM_CCM_ROOT_ENABLE; + writel_relaxed(val, pm_info->ccm_base.vbase + ccm_root[i][0]); + } + + /* restore PLLs */ + writel_relaxed(pll[0], pm_info->anatop_base.vbase + + PLL_ARM_OFFSET); + writel_relaxed(pll[1], pm_info->anatop_base.vbase + + PLL_DDR_OFFSET); + writel_relaxed(pll[2], pm_info->anatop_base.vbase + + PLL_DDR_SS_OFFSET); + writel_relaxed(pll[3], pm_info->anatop_base.vbase + + PLL_DDR_NUM_OFFSET); + writel_relaxed(pll[4], pm_info->anatop_base.vbase + + PLL_DDR_DENOM_OFFSET); + writel_relaxed(pll[5], pm_info->anatop_base.vbase + + PLL_480_OFFSET); + writel_relaxed(pll[6], pm_info->anatop_base.vbase + + PLL_ENET_OFFSET); + writel_relaxed(pll[7], pm_info->anatop_base.vbase + + PLL_AUDIO_OFFSET); + writel_relaxed(pll[8], pm_info->anatop_base.vbase + + PLL_AUDIO_SS_OFFSET); + writel_relaxed(pll[9], pm_info->anatop_base.vbase + + PLL_AUDIO_NUM_OFFSET); + writel_relaxed(pll[10], pm_info->anatop_base.vbase + + PLL_AUDIO_DENOM_OFFSET); + writel_relaxed(pll[11], pm_info->anatop_base.vbase + + PLL_VIDEO_OFFSET); + writel_relaxed(pll[12], pm_info->anatop_base.vbase + + PLL_VIDEO_SS_OFFSET); + writel_relaxed(pll[13], pm_info->anatop_base.vbase + + PLL_VIDEO_NUM_OFFSET); + writel_relaxed(pll[14], pm_info->anatop_base.vbase + + PLL_VIDEO_DENOM_OFFSET); + + for (i = 0; i < MAX_CCM_LPCG; i++) + writel_relaxed(ccm_lpcg[i], pm_info->ccm_base.vbase + + i * CCM_LPCG_STEP + CCM_LPCG_START); + /* restore PFDs */ + writel_relaxed(pfd_a & 0x80808080, + pm_info->anatop_base.vbase + PFD_A_OFFSET + REG_SET); + writel_relaxed(pfd_a, pm_info->anatop_base.vbase + PFD_A_OFFSET); + + writel_relaxed(pfd_b & 0x80808080, + pm_info->anatop_base.vbase + PFD_B_OFFSET + REG_SET); + writel_relaxed(pfd_b, pm_info->anatop_base.vbase + PFD_B_OFFSET); +} + +static void imx7_sys_counter_save(void) +{ + sys_ctrl_reg = readl_relaxed(system_counter_ctrl_base); + sys_cmp_reg = readl_relaxed(system_counter_cmp_base); +} + +static void imx7_sys_counter_restore(void) +{ + writel_relaxed(sys_ctrl_reg, system_counter_ctrl_base); + writel_relaxed(sys_cmp_reg, system_counter_cmp_base); +} + +static void imx7_gpt_save(void) +{ + gpt1_regs[0] = readl_relaxed(gpt1_base + GPT_CR); + gpt1_regs[1] = readl_relaxed(gpt1_base + GPT_PR); + gpt1_regs[2] = readl_relaxed(gpt1_base + GPT_IR); +} + +static void imx7_gpt_restore(void) +{ + writel_relaxed(gpt1_regs[0], gpt1_base + GPT_CR); + writel_relaxed(gpt1_regs[1], gpt1_base + GPT_PR); + writel_relaxed(gpt1_regs[2], gpt1_base + GPT_IR); +} + +static void imx7_iomuxc_gpr_save(void) +{ + u32 i; + + for (i = 0; i < MAX_IOMUXC_GPR; i++) + iomuxc_gpr[i] = readl_relaxed( + pm_info->iomuxc_gpr_base.vbase + i * 4); +} + +static void imx7_iomuxc_gpr_restore(void) +{ + u32 i; + + for (i = 0; i < MAX_IOMUXC_GPR; i++) + writel_relaxed(iomuxc_gpr[i], + pm_info->iomuxc_gpr_base.vbase + i * 4); +} + +static void imx7_console_save(unsigned int *regs) +{ + if (!console_base) + return; + + regs[0] = readl_relaxed(console_base + UART_UCR1); + regs[1] = readl_relaxed(console_base + UART_UCR2); + regs[2] = readl_relaxed(console_base + UART_UCR3); + regs[3] = readl_relaxed(console_base + UART_UCR4); + regs[4] = readl_relaxed(console_base + UART_UFCR); + regs[5] = readl_relaxed(console_base + UART_UESC); + regs[6] = readl_relaxed(console_base + UART_UTIM); + regs[7] = readl_relaxed(console_base + UART_UBIR); + regs[8] = readl_relaxed(console_base + UART_UBMR); + regs[9] = readl_relaxed(console_base + UART_UTS); +} + +static void imx7_console_io_save(void) +{ + /* save uart1 io, driver resume is too late */ + uart1_io[0] = readl_relaxed(iomuxc_base + UART_RX_IO); + uart1_io[1] = readl_relaxed(iomuxc_base + UART_RX_PAD); + uart1_io[2] = readl_relaxed(iomuxc_base + UART_TX_IO); + uart1_io[3] = readl_relaxed(iomuxc_base + UART_TX_PAD); +} + +static void imx7_console_restore(unsigned int *regs) +{ + if (!console_base) + return; + + writel_relaxed(regs[4], console_base + UART_UFCR); + writel_relaxed(regs[5], console_base + UART_UESC); + writel_relaxed(regs[6], console_base + UART_UTIM); + writel_relaxed(regs[7], console_base + UART_UBIR); + writel_relaxed(regs[8], console_base + UART_UBMR); + writel_relaxed(regs[9], console_base + UART_UTS); + writel_relaxed(regs[0], console_base + UART_UCR1); + writel_relaxed(regs[1] | 0x1, console_base + UART_UCR2); + writel_relaxed(regs[2], console_base + UART_UCR3); + writel_relaxed(regs[3], console_base + UART_UCR4); +} + +static void imx7_console_io_restore(void) +{ + /* restore uart1 io */ + writel_relaxed(uart1_io[0], iomuxc_base + UART_RX_IO); + writel_relaxed(uart1_io[1], iomuxc_base + UART_RX_PAD); + writel_relaxed(uart1_io[2], iomuxc_base + UART_TX_IO); + writel_relaxed(uart1_io[3], iomuxc_base + UART_TX_PAD); +} + +#define MX7D_SUSPEND_POWERDWN_PARAM \ + ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) + +#define MX7D_SUSPEND_STANDBY_PARAM \ + ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ + (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ + (PSCI_POWER_STATE_TYPE_STANDBY << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) + +static int imx7_suspend_finish(unsigned long val) +{ + u32 state; + + if (val == 0) + state = MX7D_SUSPEND_POWERDWN_PARAM; + else + state = MX7D_SUSPEND_STANDBY_PARAM; + + if (psci_ops.cpu_suspend) { + return psci_ops.cpu_suspend(state, __pa(cpu_resume)); + } + + if (!imx7_suspend_in_ocram_fn) { + cpu_do_idle(); + } else { + /* + * call low level suspend function in ocram, + * as we need to float DDR IO. + */ + local_flush_tlb_all(); + imx7_suspend_in_ocram_fn(suspend_ocram_base); + } + + return 0; +} + +static void imx7_pm_set_lpsr_resume_addr(unsigned long addr) +{ + writel_relaxed(addr, pm_info->lpsr_base.vbase); +} + +static int imx7_pm_is_resume_from_lpsr(void) +{ + return readl_relaxed(lpsr_base); +} + +static int imx7_pm_enter(suspend_state_t state) +{ + unsigned int console_saved_reg[10] = {0}; + u32 val; + + if (!iram_tlb_base_addr) { + pr_warn("No IRAM/OCRAM memory allocated for suspend/resume \ + code. Please ensure device tree has an entry for \ + fsl,lpm-sram.\n"); + return -EINVAL; + } + + /* + * arm_arch_timer driver requires system counter to be + * a clock source with CLOCK_SOURCE_SUSPEND_NONSTOP flag + * set, which means hardware system counter needs to keep + * running during suspend, as the base clock for system + * counter is 24MHz which will be disabled in STOP mode, + * so we need to switch system counter's clock to alternate + * (lower) clock, it is based on 32K, from block guide, there + * is no special flow needs to be followed, system counter + * hardware will handle the clock transition. + */ + val = readl_relaxed(system_counter_ctrl_base); + val &= ~BM_SYS_COUNTER_CNTCR_FCR0; + val |= BM_SYS_COUNTER_CNTCR_FCR1; + writel_relaxed(val, system_counter_ctrl_base); + + switch (state) { + case PM_SUSPEND_STANDBY: + imx_anatop_pre_suspend(); + imx_gpcv2_pre_suspend(false); + + /* Zzz ... */ + if (psci_ops.cpu_suspend) + cpu_suspend(1, imx7_suspend_finish); + else + imx7_suspend_in_ocram_fn(suspend_ocram_base); + + imx_anatop_post_resume(); + imx_gpcv2_post_resume(); + break; + case PM_SUSPEND_MEM: + imx_anatop_pre_suspend(); + imx_gpcv2_pre_suspend(true); + if (imx_gpcv2_is_mf_mix_off()) { + /* + * per design requirement, EXSC for PCIe/EIM/PXP + * will need clock to recover RDC setting on + * resume, so enable PCIe/EIM LPCG for RDC + * recovery when M/F mix off + */ + writel_relaxed(0x3, pm_info->ccm_base.vbase + + CCM_EIM_LPCG); + writel_relaxed(0x3, pm_info->ccm_base.vbase + + CCM_PXP_LPCG); + writel_relaxed(0x3, pm_info->ccm_base.vbase + + CCM_PCIE_LPCG); + /* stop m4 if mix will also be shutdown */ + if (imx_src_is_m4_enabled() && imx_mu_is_m4_in_stop()) { + writel(M4_RCR_HALT, + pm_info->src_base.vbase + M4RCR); + imx_gpcv2_enable_wakeup_for_m4(); + } + imx7_console_save(console_saved_reg); + memcpy(ocram_saved_in_ddr, ocram_base, ocram_size); + if (lpsr_enabled) { + imx7_pm_set_lpsr_resume_addr(pm_info->resume_addr); + imx7_console_io_save(); + memcpy(lpm_ocram_saved_in_ddr, lpm_ocram_base, + lpm_ocram_size); + imx7_iomuxc_gpr_save(); + imx7_ccm_save(); + imx7_gpt_save(); + imx7_sys_counter_save(); + imx7_gpio_save(); + } + } + + /* Zzz ... */ + cpu_suspend(0, imx7_suspend_finish); + + if (imx7_pm_is_resume_from_lpsr()) { + imx7_console_io_restore(); + memcpy(lpm_ocram_base, lpm_ocram_saved_in_ddr, + lpm_ocram_size); + imx7_iomuxc_gpr_restore(); + imx7_ccm_restore(); + imx7_gpt_restore(); + imx7_sys_counter_restore(); + imx7_gpio_restore(); + imx7d_enable_rcosc(); + } + if (imx_gpcv2_is_mf_mix_off() || + imx7_pm_is_resume_from_lpsr()) { + writel_relaxed(0x0, pm_info->ccm_base.vbase + + CCM_EIM_LPCG); + writel_relaxed(0x0, pm_info->ccm_base.vbase + + CCM_PXP_LPCG); + writel_relaxed(0x0, pm_info->ccm_base.vbase + + CCM_PCIE_LPCG); + memcpy(ocram_base, ocram_saved_in_ddr, ocram_size); + imx7_console_restore(console_saved_reg); + if (imx_src_is_m4_enabled() && imx_mu_is_m4_in_stop()) { + imx_gpcv2_disable_wakeup_for_m4(); + /* restore M4 image */ + memcpy(lpm_m4tcm_base, + lpm_m4tcm_saved_in_ddr, SZ_32K); + /* kick m4 to enable */ + writel(M4_RCR_GO, + pm_info->src_base.vbase + M4RCR); + /* offset high bus count for m4 image */ + request_bus_freq(BUS_FREQ_HIGH); + /* restore M4 to run mode */ + imx_mu_set_m4_run_mode(); + /* gpc wakeup */ + } + } + /* clear LPSR resume address */ + imx7_pm_set_lpsr_resume_addr(0); + imx_anatop_post_resume(); + imx_gpcv2_post_resume(); + break; + default: + return -EINVAL; + } + + /* restore system counter's clock to base clock */ + val = readl_relaxed(system_counter_ctrl_base); + val &= ~BM_SYS_COUNTER_CNTCR_FCR1; + val |= BM_SYS_COUNTER_CNTCR_FCR0; + writel_relaxed(val, system_counter_ctrl_base); + + return 0; +} + +static int imx7_pm_valid(suspend_state_t state) +{ + return state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM; +} + +static const struct platform_suspend_ops imx7_pm_ops = { + .enter = imx7_pm_enter, + .valid = imx7_pm_valid, +}; + +void __init imx7_pm_set_ccm_base(void __iomem *base) +{ + ccm_base = base; +} + static struct map_desc iram_tlb_io_desc __initdata = { /* .virtual and .pfn are run-time assigned */ .length = SZ_1M, @@ -146,3 +971,257 @@ void __init imx7_pm_map_io(void) *((unsigned long *)iram_tlb_base_addr + j) = (MX7D_GIC_BASE_ADDR & 0xFFF00000) | TT_ATTRIB_NON_CACHEABLE_1M; } + +static int __init imx7_suspend_init(const struct imx7_pm_socdata *socdata) +{ + struct device_node *node; + int i, ret = 0; + const u32 (*ddrc_offset_array)[2]; + const u32 (*ddrc_phy_offset_array)[2]; + unsigned long iram_paddr; + + suspend_set_ops(&imx7_pm_ops); + + if (!socdata) { + pr_warn("%s: invalid argument!\n", __func__); + return -EINVAL; + } + + /* + * 16KB is allocated for IRAM TLB, but only up 8k is for kernel TLB, + * The lower 8K is not used, so use the lower 8K for IRAM code and + * pm_info. + * + */ + iram_paddr = iram_tlb_phys_addr + MX7_SUSPEND_IRAM_ADDR_OFFSET; + + /* Make sure iram_paddr is 8 byte aligned. */ + if ((uintptr_t)(iram_paddr) & (FNCPY_ALIGN - 1)) + iram_paddr += FNCPY_ALIGN - iram_paddr % (FNCPY_ALIGN); + + /* Get the virtual address of the suspend code. */ + suspend_ocram_base = (void *)IMX_IO_P2V(iram_paddr); + + if (psci_ops.cpu_suspend) { + pm_info = kmalloc(sizeof(*pm_info), GFP_KERNEL); + if (!pm_info) + return -ENOMEM; + } else { + pm_info = suspend_ocram_base; + } + /* pbase points to iram_paddr. */ + pm_info->pbase = iram_paddr; + pm_info->resume_addr = virt_to_phys(ca7_cpu_resume); + pm_info->pm_info_size = sizeof(*pm_info); + + /* + * ccm physical address is not used by asm code currently, + * so get ccm virtual address directly, as we already have + * it from ccm driver. + */ + pm_info->ccm_base.pbase = MX7D_CCM_BASE_ADDR; + pm_info->ccm_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_CCM_BASE_ADDR); + + pm_info->ddrc_base.pbase = MX7D_DDRC_BASE_ADDR; + pm_info->ddrc_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_DDRC_BASE_ADDR); + + pm_info->ddrc_phy_base.pbase = MX7D_DDRC_PHY_BASE_ADDR; + pm_info->ddrc_phy_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_DDRC_PHY_BASE_ADDR); + + pm_info->src_base.pbase = MX7D_SRC_BASE_ADDR; + pm_info->src_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_SRC_BASE_ADDR); + + pm_info->iomuxc_gpr_base.pbase = MX7D_IOMUXC_GPR_BASE_ADDR; + pm_info->iomuxc_gpr_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_IOMUXC_GPR_BASE_ADDR); + + pm_info->gpc_base.pbase = MX7D_GPC_BASE_ADDR; + pm_info->gpc_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_GPC_BASE_ADDR); + + pm_info->anatop_base.pbase = MX7D_ANATOP_BASE_ADDR; + pm_info->anatop_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_ANATOP_BASE_ADDR); + + pm_info->snvs_base.pbase = MX7D_SNVS_BASE_ADDR; + pm_info->snvs_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_SNVS_BASE_ADDR); + + pm_info->lpsr_base.pbase = MX7D_LPSR_BASE_ADDR; + lpsr_base = pm_info->lpsr_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_LPSR_BASE_ADDR); + + pm_info->gic_base.pbase = MX7D_GIC_BASE_ADDR; + pm_info->gic_base.vbase = (void __iomem *) + IMX_IO_P2V(MX7D_GIC_BASE_ADDR); + + pm_info->ddrc_num = socdata->ddrc_num; + ddrc_offset_array = socdata->ddrc_offset; + pm_info->ddrc_phy_num = socdata->ddrc_phy_num; + ddrc_phy_offset_array = socdata->ddrc_phy_offset; + + /* initialize DDRC settings */ + for (i = 0; i < pm_info->ddrc_num; i++) { + pm_info->ddrc_val[i][0] = ddrc_offset_array[i][0]; + if (ddrc_offset_array[i][1] == READ_DATA_FROM_HARDWARE) + pm_info->ddrc_val[i][1] = + readl_relaxed(pm_info->ddrc_base.vbase + + ddrc_offset_array[i][0]); + else + pm_info->ddrc_val[i][1] = ddrc_offset_array[i][1]; + + if (pm_info->ddrc_val[i][0] == 0xd0) + pm_info->ddrc_val[i][1] |= 0xc0000000; + } + + /* initialize DDRC PHY settings */ + for (i = 0; i < pm_info->ddrc_phy_num; i++) { + pm_info->ddrc_phy_val[i][0] = + ddrc_phy_offset_array[i][0]; + if (ddrc_phy_offset_array[i][1] == READ_DATA_FROM_HARDWARE) + pm_info->ddrc_phy_val[i][1] = + readl_relaxed(pm_info->ddrc_phy_base.vbase + + ddrc_phy_offset_array[i][0]); + else + pm_info->ddrc_phy_val[i][1] = + ddrc_phy_offset_array[i][1]; + } + + if (psci_ops.cpu_suspend) + goto put_node; + + imx7_suspend_in_ocram_fn = fncpy( + suspend_ocram_base + sizeof(*pm_info), + &imx7_suspend, + MX7_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); + + goto put_node; + +put_node: + of_node_put(node); + + return ret; +} + +static void __init imx7_pm_common_init(const struct imx7_pm_socdata + *socdata) +{ + int ret; + struct regmap *gpr; + + if (IS_ENABLED(CONFIG_SUSPEND)) { + ret = imx7_suspend_init(socdata); + if (ret) + pr_warn("%s: No DDR LPM support with suspend %d!\n", + __func__, ret); + } + + /* + * Force IOMUXC irq pending, so that the interrupt to GPC can be + * used to deassert dsm_request signal when the signal gets + * asserted unexpectedly. + */ + gpr = syscon_regmap_lookup_by_compatible("fsl,imx7d-iomuxc-gpr"); + if (!IS_ERR(gpr)) + regmap_update_bits(gpr, IOMUXC_GPR1, IMX7D_GPR1_IRQ_MASK, + IMX7D_GPR1_IRQ_MASK); +} + +void __init imx7d_pm_init(void) +{ + struct device_node *np; + struct resource res; + if (imx_src_is_m4_enabled()) { + /* map the 32K of M4 TCM */ + np = of_find_node_by_path( + "/tcml@007f8000"); + if (np) + lpm_m4tcm_base = of_iomap(np, 0); + WARN_ON(!lpm_m4tcm_base); + + /* map the m4 bootrom from dtb */ + np = of_find_node_by_path( + "/soc/sram@00180000"); + if (np) + m4_bootrom_base = of_iomap(np, 0); + WARN_ON(!m4_bootrom_base); + + lpm_m4tcm_saved_in_ddr = kzalloc(SZ_32K, GFP_KERNEL); + WARN_ON(!lpm_m4tcm_saved_in_ddr); + + /* save M4 Image to DDR */ + memcpy(lpm_m4tcm_saved_in_ddr, lpm_m4tcm_base, SZ_32K); + } + np = of_find_compatible_node(NULL, NULL, "fsl,lpm-sram"); + if (of_get_property(np, "fsl,enable-lpsr", NULL)) + lpsr_enabled = true; + + if (psci_ops.cpu_suspend) + lpsr_enabled = false; + + if (lpsr_enabled) { + pr_info("LPSR mode enabled, DSM will go into LPSR mode!\n"); + lpm_ocram_base = of_iomap(np, 0); + WARN_ON(!lpm_ocram_base); + WARN_ON(of_address_to_resource(np, 0, &res)); + lpm_ocram_size = resource_size(&res); + lpm_ocram_saved_in_ddr = kzalloc(lpm_ocram_size, GFP_KERNEL); + WARN_ON(!lpm_ocram_saved_in_ddr); + + np = of_find_node_by_path( + "/soc/aips-bus@30000000/iomuxc@30330000"); + if (np) + iomuxc_base = of_iomap(np, 0); + WARN_ON(!iomuxc_base); + + np = of_find_node_by_path( + "/soc/aips-bus@30000000/gpt@302d0000"); + if (np) + gpt1_base = of_iomap(np, 0); + WARN_ON(!gpt1_base); + + np = of_find_node_by_path( + "/soc/aips-bus@30400000/system-counter-cmp@306b0000"); + if (np) + system_counter_cmp_base = of_iomap(np, 0); + WARN_ON(!system_counter_cmp_base); + + np = of_find_node_by_path( + "/soc/aips-bus@30000000/gpio@30200000"); + if (np) + gpio1_base = of_iomap(np, 0); + WARN_ON(!gpio1_base); + } + + np = of_find_node_by_path( + "/soc/aips-bus@30400000/system-counter-ctrl@306c0000"); + if (np) + system_counter_ctrl_base = of_iomap(np, 0); + WARN_ON(!system_counter_ctrl_base); + + if (imx_ddrc_get_ddr_type() == IMX_DDR_TYPE_LPDDR3 + || imx_ddrc_get_ddr_type() == IMX_DDR_TYPE_LPDDR2) + imx7_pm_common_init(&imx7d_pm_data_lpddr3); + else if (imx_ddrc_get_ddr_type() == IMX_DDR_TYPE_DDR3) + imx7_pm_common_init(&imx7d_pm_data_ddr3); + + np = of_find_compatible_node(NULL, NULL, "fsl,mega-fast-sram"); + ocram_base = of_iomap(np, 0); + WARN_ON(!ocram_base); + WARN_ON(of_address_to_resource(np, 0, &res)); + ocram_size = resource_size(&res); + ocram_saved_in_ddr = kzalloc(ocram_size, GFP_KERNEL); + WARN_ON(!ocram_saved_in_ddr); + + np = of_find_node_by_path( + "/soc/aips-bus@30800000/serial@30860000"); + if (np) + console_base = of_iomap(np, 0); + + /* clear LPSR resume address first */ + imx7_pm_set_lpsr_resume_addr(0); +} diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index 60dad38d02f7..399771e2912e 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -24,9 +24,17 @@ #define BP_SRC_SCR_SW_IPU2_RST 12 #define BP_SRC_SCR_CORE1_RST 14 #define BP_SRC_SCR_CORE1_ENABLE 22 +/* below is for i.MX7D */ +#define SRC_GPR1_V2 0x074 +#define SRC_A7RCR0 0x004 +#define SRC_A7RCR1 0x008 +#define SRC_M4RCR 0x00C + +#define BP_SRC_A7RCR0_A7_CORE_RESET0 0 +#define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1 static void __iomem *src_base; -static DEFINE_SPINLOCK(scr_lock); +static DEFINE_SPINLOCK(src_lock); static bool m4_is_enabled; static const int sw_reset_bits[5] = { @@ -58,11 +66,11 @@ static int imx_src_reset_module(struct reset_controller_dev *rcdev, bit = 1 << sw_reset_bits[sw_reset_idx]; - spin_lock_irqsave(&scr_lock, flags); + spin_lock_irqsave(&src_lock, flags); val = readl_relaxed(src_base + SRC_SCR); val |= bit; writel_relaxed(val, src_base + SRC_SCR); - spin_unlock_irqrestore(&scr_lock, flags); + spin_unlock_irqrestore(&src_lock, flags); timeout = jiffies + msecs_to_jiffies(1000); while (readl(src_base + SRC_SCR) & bit) { @@ -88,32 +96,59 @@ void imx_enable_cpu(int cpu, bool enable) u32 mask, val; cpu = cpu_logical_map(cpu); - mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); - spin_lock(&scr_lock); - val = readl_relaxed(src_base + SRC_SCR); - val = enable ? val | mask : val & ~mask; - val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1); - writel_relaxed(val, src_base + SRC_SCR); - spin_unlock(&scr_lock); + spin_lock(&src_lock); + if (cpu_is_imx7d()) { + /* enable core */ + if (enable) + imx_gpcv2_set_core1_pdn_pup_by_software(false); + + mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1); + val = readl_relaxed(src_base + SRC_A7RCR1); + val = enable ? val | mask : val & ~mask; + writel_relaxed(val, src_base + SRC_A7RCR1); + } else { + mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1); + val = readl_relaxed(src_base + SRC_SCR); + val = enable ? val | mask : val & ~mask; + val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1); + writel_relaxed(val, src_base + SRC_SCR); + } + spin_unlock(&src_lock); } void imx_set_cpu_jump(int cpu, void *jump_addr) { + spin_lock(&src_lock); cpu = cpu_logical_map(cpu); - writel_relaxed(__pa_symbol(jump_addr), - src_base + SRC_GPR1 + cpu * 8); + if (cpu_is_imx7d()) + writel_relaxed(__pa_symbol(jump_addr), + src_base + SRC_GPR1_V2 + cpu * 8); + else + writel_relaxed(__pa_symbol(jump_addr), + src_base + SRC_GPR1 + cpu * 8); + spin_unlock(&src_lock); } u32 imx_get_cpu_arg(int cpu) { cpu = cpu_logical_map(cpu); - return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); + if (cpu_is_imx7d()) + return readl_relaxed(src_base + SRC_GPR1_V2 + + cpu * 8 + 4); + else + return readl_relaxed(src_base + SRC_GPR1 + + cpu * 8 + 4); } void imx_set_cpu_arg(int cpu, u32 arg) { cpu = cpu_logical_map(cpu); - writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); + if (cpu_is_imx7d()) + writel_relaxed(arg, src_base + SRC_GPR1_V2 + + cpu * 8 + 4); + else + writel_relaxed(arg, src_base + SRC_GPR1 + + cpu * 8 + 4); } void __init imx_src_init(void) @@ -127,6 +162,15 @@ void __init imx_src_init(void) src_base = of_iomap(np, 0); WARN_ON(!src_base); + if (cpu_is_imx7d()) { + val = readl_relaxed(src_base + SRC_M4RCR); + if (((val & BIT(3)) == BIT(3)) && !(val & BIT(0))) + m4_is_enabled = true; + else + m4_is_enabled = false; + return; + } + imx_reset_controller.of_node = np; if (IS_ENABLED(CONFIG_RESET_CONTROLLER)) reset_controller_register(&imx_reset_controller); @@ -135,7 +179,7 @@ void __init imx_src_init(void) * force warm reset sources to generate cold reset * for a more reliable restart */ - spin_lock(&scr_lock); + spin_lock(&src_lock); val = readl_relaxed(src_base + SRC_SCR); /* bit 4 is m4c_non_sclr_rst on i.MX6SX */ @@ -147,5 +191,5 @@ void __init imx_src_init(void) val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); writel_relaxed(val, src_base + SRC_SCR); - spin_unlock(&scr_lock); + spin_unlock(&src_lock); } diff --git a/arch/arm/mach-imx/suspend-imx7.S b/arch/arm/mach-imx/suspend-imx7.S new file mode 100644 index 000000000000..5f4e31152a69 --- /dev/null +++ b/arch/arm/mach-imx/suspend-imx7.S @@ -0,0 +1,714 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include +#include +#include "hardware.h" + +/* + * ==================== low level suspend ==================== + * + * Better to follow below rules to use ARM registers: + * r0: pm_info structure address; + * r1 ~ r4: for saving pm_info members; + * r5 ~ r10: free registers; + * r11: io base address. + * + * suspend ocram space layout: + * ======================== high address ====================== + * . + * . + * . + * ^ + * ^ + * ^ + * imx7_suspend code + * PM_INFO structure(imx7_cpu_pm_info) + * ======================== low address ======================= + */ + +/* + * Below offsets are based on struct imx7_cpu_pm_info + * which defined in arch/arm/mach-imx/pm-imx7.c, this + * structure contains necessary pm info for low level + * suspend related code. + */ +#define PM_INFO_M4_RESERVE0_OFFSET 0x0 +#define PM_INFO_M4_RESERVE1_OFFSET 0x4 +#define PM_INFO_M4_RESERVE2_OFFSET 0x8 +#define PM_INFO_PBASE_OFFSET 0xc +#define PM_INFO_RESUME_ADDR_OFFSET 0x10 +#define PM_INFO_DDR_TYPE_OFFSET 0x14 +#define PM_INFO_PM_INFO_SIZE_OFFSET 0x18 +#define PM_INFO_MX7_DDRC_P_OFFSET 0x1c +#define PM_INFO_MX7_DDRC_V_OFFSET 0x20 +#define PM_INFO_MX7_DDRC_PHY_P_OFFSET 0x24 +#define PM_INFO_MX7_DDRC_PHY_V_OFFSET 0x28 +#define PM_INFO_MX7_SRC_P_OFFSET 0x2c +#define PM_INFO_MX7_SRC_V_OFFSET 0x30 +#define PM_INFO_MX7_IOMUXC_GPR_P_OFFSET 0x34 +#define PM_INFO_MX7_IOMUXC_GPR_V_OFFSET 0x38 +#define PM_INFO_MX7_CCM_P_OFFSET 0x3c +#define PM_INFO_MX7_CCM_V_OFFSET 0x40 +#define PM_INFO_MX7_GPC_P_OFFSET 0x44 +#define PM_INFO_MX7_GPC_V_OFFSET 0x48 +#define PM_INFO_MX7_SNVS_P_OFFSET 0x4c +#define PM_INFO_MX7_SNVS_V_OFFSET 0x50 +#define PM_INFO_MX7_ANATOP_P_OFFSET 0x54 +#define PM_INFO_MX7_ANATOP_V_OFFSET 0x58 +#define PM_INFO_MX7_LPSR_P_OFFSET 0x5c +#define PM_INFO_MX7_LPSR_V_OFFSET 0x60 +#define PM_INFO_MX7_GIC_DIST_P_OFFSET 0x64 +#define PM_INFO_MX7_GIC_DIST_V_OFFSET 0x68 +#define PM_INFO_MX7_TTBR1_V_OFFSET 0x6c +#define PM_INFO_DDRC_REG_NUM_OFFSET 0x70 +#define PM_INFO_DDRC_REG_OFFSET 0x74 +#define PM_INFO_DDRC_VALUE_OFFSET 0x78 +#define PM_INFO_DDRC_PHY_REG_NUM_OFFSET 0x174 +#define PM_INFO_DDRC_PHY_REG_OFFSET 0x178 +#define PM_INFO_DDRC_PHY_VALUE_OFFSET 0x17c + +#define MX7_SRC_GPR1 0x74 +#define MX7_SRC_GPR2 0x78 +#define GPC_PGC_C0 0x800 +#define GPC_PGC_FM 0xa00 +#define ANADIG_SNVS_MISC_CTRL 0x380 +#define ANADIG_SNVS_MISC_CTRL_SET 0x384 +#define ANADIG_SNVS_MISC_CTRL_CLR 0x388 +#define ANADIG_DIGPROG 0x800 +#define DDRC_STAT 0x4 +#define DDRC_PWRCTL 0x30 +#define DDRC_PSTAT 0x3fc +#define DDRC_PCTRL_0 0x490 +#define DDRC_DFIMISC 0x1b0 +#define DDRC_SWCTL 0x320 +#define DDRC_SWSTAT 0x324 +#define DDRPHY_LP_CON0 0x18 + +#define CCM_SNVS_LPCG 0x250 +#define MX7D_GPC_IMR1 0x30 +#define MX7D_GPC_IMR2 0x34 +#define MX7D_GPC_IMR3 0x38 +#define MX7D_GPC_IMR4 0x3c + + .align 3 + + .macro disable_l1_dcache + + /* + * Flush all data from the L1 data cache before disabling + * SCTLR.C bit. + */ + push {r0 - r10, lr} + ldr r7, =v7_flush_dcache_all + mov lr, pc + mov pc, r7 + pop {r0 - r10, lr} + + /* disable d-cache */ + mrc p15, 0, r7, c1, c0, 0 + bic r7, r7, #(1 << 2) + mcr p15, 0, r7, c1, c0, 0 + dsb + isb + + push {r0 - r10, lr} + ldr r7, =v7_flush_dcache_all + mov lr, pc + mov pc, r7 + pop {r0 - r10, lr} + + .endm + + .macro store_ttbr1 + + /* Store TTBR1 to pm_info->ttbr1 */ + mrc p15, 0, r7, c2, c0, 1 + str r7, [r0, #PM_INFO_MX7_TTBR1_V_OFFSET] + + /* Disable Branch Prediction, Z bit in SCTLR. */ + mrc p15, 0, r6, c1, c0, 0 + bic r6, r6, #0x800 + mcr p15, 0, r6, c1, c0, 0 + + /* Flush the BTAC. */ + ldr r6, =0x0 + mcr p15, 0, r6, c7, c1, 6 + + ldr r6, =iram_tlb_phys_addr + ldr r6, [r6] + dsb + isb + + /* Store the IRAM table in TTBR1 */ + mcr p15, 0, r6, c2, c0, 1 + /* Read TTBCR and set PD0=1, N = 1 */ + mrc p15, 0, r6, c2, c0, 2 + orr r6, r6, #0x11 + mcr p15, 0, r6, c2, c0, 2 + + dsb + isb + + /* flush the TLB */ + ldr r6, =0x0 + mcr p15, 0, r6, c8, c3, 0 + + .endm + + .macro restore_ttbr1 + + /* Enable L1 data cache. */ + mrc p15, 0, r6, c1, c0, 0 + orr r6, r6, #0x4 + mcr p15, 0, r6, c1, c0, 0 + + dsb + isb + + /* Restore TTBCR */ + /* Read TTBCR and set PD0=0, N = 0 */ + mrc p15, 0, r6, c2, c0, 2 + bic r6, r6, #0x11 + mcr p15, 0, r6, c2, c0, 2 + dsb + isb + + /* flush the TLB */ + ldr r6, =0x0 + mcr p15, 0, r6, c8, c3, 0 + + /* Enable Branch Prediction, Z bit in SCTLR. */ + mrc p15, 0, r6, c1, c0, 0 + orr r6, r6, #0x800 + mcr p15, 0, r6, c1, c0, 0 + + /* Flush the Branch Target Address Cache (BTAC) */ + ldr r6, =0x0 + mcr p15, 0, r6, c7, c1, 6 + + /* Restore TTBR1, get the origin ttbr1 from pm info */ + ldr r7, [r0, #PM_INFO_MX7_TTBR1_V_OFFSET] + mcr p15, 0, r7, c2, c0, 1 + + .endm + + .macro ddrc_enter_self_refresh + + ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET] + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r11, #DDRC_PWRCTL] + + /* wait rw port_busy clear */ + ldr r6, =(0x1 << 16) + orr r6, r6, #0x1 +1: + ldr r7, [r11, #DDRC_PSTAT] + ands r7, r7, r6 + bne 1b + + /* enter self-refresh bit 5 */ + ldr r7, =(0x1 << 5) + str r7, [r11, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +2: + ldr r7, [r11, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + bne 2b +3: + ldr r7, [r11, #DDRC_STAT] + ands r7, r7, #0x20 + beq 3b + + /* disable dram clk */ + ldr r7, [r11, #DDRC_PWRCTL] + orr r7, r7, #(1 << 3) + str r7, [r11, #DDRC_PWRCTL] + + .endm + + .macro ddrc_exit_self_refresh + + cmp r5, #0x0 + ldreq r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET] + ldrne r11, [r0, #PM_INFO_MX7_DDRC_P_OFFSET] + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r11, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +4: + ldr r7, [r11, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + beq 4b + + /* enable auto self-refresh */ + ldr r7, [r11, #DDRC_PWRCTL] + orr r7, r7, #(1 << 0) + str r7, [r11, #DDRC_PWRCTL] + + .endm + + .macro wait_delay +5: + subs r6, r6, #0x1 + bne 5b + + .endm + + .macro ddr_enter_retention + + ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET] + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r11, #DDRC_PCTRL_0] + + /* wait rw port_busy clear */ + ldr r6, =(0x1 << 16) + orr r6, r6, #0x1 +6: + ldr r7, [r11, #DDRC_PSTAT] + ands r7, r7, r6 + bne 6b + + ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET] + /* enter self-refresh bit 5 */ + ldr r7, =(0x1 << 5) + str r7, [r11, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +7: + ldr r7, [r11, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + bne 7b +8: + ldr r7, [r11, #DDRC_STAT] + ands r7, r7, #0x20 + beq 8b + + /* disable dram clk */ + ldr r7, =(0x1 << 5) + orr r7, r7, #(1 << 3) + str r7, [r11, #DDRC_PWRCTL] + + ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET] + ldr r7, [r11, #ANADIG_DIGPROG] + and r7, r7, #0xff + cmp r7, #0x11 + bne 10f + + /* TO 1.1 */ + ldr r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFFSET] + ldr r7, =0x38000000 + str r7, [r11] + + /* LPSR mode need to use TO1.0 flow as IOMUX lost power */ + ldr r10, [r0, #PM_INFO_MX7_LPSR_V_OFFSET] + ldr r7, [r10] + cmp r7, #0x0 + beq 11f +10: + /* reset ddr_phy */ + ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET] + ldr r7, =0x0 + str r7, [r11, #ANADIG_SNVS_MISC_CTRL] + + /* delay 7 us */ + ldr r6, =6000 + wait_delay + + ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFFSET] + ldr r6, =0x1000 + ldr r7, [r11, r6] + orr r7, r7, #0x1 + str r7, [r11, r6] +11: + /* turn off ddr power */ + ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET] + ldr r7, =(0x1 << 29) + str r7, [r11, #ANADIG_SNVS_MISC_CTRL_SET] + + ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFFSET] + ldr r6, =0x1000 + ldr r7, [r11, r6] + orr r7, r7, #0x1 + str r7, [r11, r6] + + .endm + + .macro ddr_exit_retention + + cmp r5, #0x0 + ldreq r1, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET] + ldrne r1, [r0, #PM_INFO_MX7_ANATOP_P_OFFSET] + ldreq r2, [r0, #PM_INFO_MX7_SRC_V_OFFSET] + ldrne r2, [r0, #PM_INFO_MX7_SRC_P_OFFSET] + ldreq r3, [r0, #PM_INFO_MX7_DDRC_V_OFFSET] + ldrne r3, [r0, #PM_INFO_MX7_DDRC_P_OFFSET] + ldreq r4, [r0, #PM_INFO_MX7_DDRC_PHY_V_OFFSET] + ldrne r4, [r0, #PM_INFO_MX7_DDRC_PHY_P_OFFSET] + ldreq r10, [r0, #PM_INFO_MX7_CCM_V_OFFSET] + ldrne r10, [r0, #PM_INFO_MX7_CCM_P_OFFSET] + ldreq r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFFSET] + ldrne r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_P_OFFSET] + + /* turn on ddr power */ + ldr r7, =(0x1 << 29) + str r7, [r1, #ANADIG_SNVS_MISC_CTRL_CLR] + + ldr r6, =50 + wait_delay + + /* clear ddr_phy reset */ + ldr r6, =0x1000 + ldr r7, [r2, r6] + orr r7, r7, #0x3 + str r7, [r2, r6] + ldr r7, [r2, r6] + bic r7, r7, #0x1 + str r7, [r2, r6] +13: + ldr r6, [r0, #PM_INFO_DDRC_REG_NUM_OFFSET] + ldr r7, =PM_INFO_DDRC_REG_OFFSET + add r7, r7, r0 +14: + ldr r8, [r7], #0x4 + ldr r9, [r7], #0x4 + str r9, [r3, r8] + subs r6, r6, #0x1 + bne 14b + ldr r7, =0x20 + str r7, [r3, #DDRC_PWRCTL] + ldr r7, =0x0 + str r7, [r3, #DDRC_DFIMISC] + + /* do PHY, clear ddr_phy reset */ + ldr r6, =0x1000 + ldr r7, [r2, r6] + bic r7, r7, #0x2 + str r7, [r2, r6] + + ldr r7, [r1, #ANADIG_DIGPROG] + and r7, r7, #0xff + cmp r7, #0x11 + bne 12f + + /* + * TKT262940: + * System hang when press RST for DDR PAD is + * in retention mode, fixed on TO1.1 + */ + ldr r7, [r11] + bic r7, r7, #(1 << 27) + str r7, [r11] + ldr r7, [r11] + bic r7, r7, #(1 << 29) + str r7, [r11] +12: + ldr r7, =(0x1 << 30) + str r7, [r1, #ANADIG_SNVS_MISC_CTRL_SET] + + /* need to delay ~5mS */ + ldr r6, =0x100000 + wait_delay + + ldr r6, [r0, #PM_INFO_DDRC_PHY_REG_NUM_OFFSET] + ldr r7, =PM_INFO_DDRC_PHY_REG_OFFSET + add r7, r7, r0 + +15: + ldr r8, [r7], #0x4 + ldr r9, [r7], #0x4 + str r9, [r4, r8] + subs r6, r6, #0x1 + bne 15b + + ldr r7, =0x0 + add r9, r10, #0x4000 + str r7, [r9, #0x130] + + ldr r7, =0x170 + orr r7, r7, #0x8 + str r7, [r11, #0x20] + + ldr r7, =0x2 + add r9, r10, #0x4000 + str r7, [r9, #0x130] + + ldr r7, =0xf + str r7, [r4, #DDRPHY_LP_CON0] + + /* wait until self-refresh mode entered */ +16: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x3 + bne 16b + ldr r7, =0x0 + str r7, [r3, #DDRC_SWCTL] + ldr r7, =0x1 + str r7, [r3, #DDRC_DFIMISC] + ldr r7, =0x1 + str r7, [r3, #DDRC_SWCTL] +17: + ldr r7, [r3, #DDRC_SWSTAT] + and r7, r7, #0x1 + cmp r7, #0x1 + bne 17b +18: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x20 + cmp r7, #0x20 + bne 18b + + /* let DDR out of self-refresh */ + ldr r7, =0x0 + str r7, [r3, #DDRC_PWRCTL] +19: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x30 + cmp r7, #0x0 + bne 19b + +20: + ldr r7, [r3, #DDRC_STAT] + and r7, r7, #0x3 + cmp r7, #0x1 + bne 20b + + /* enable port */ + ldr r7, =0x1 + str r7, [r3, #DDRC_PCTRL_0] + + /* enable auto self-refresh */ + ldr r7, [r3, #DDRC_PWRCTL] + orr r7, r7, #(1 << 0) + str r7, [r3, #DDRC_PWRCTL] + + .endm + +ENTRY(imx7_suspend) + push {r4-r12} + + /* make sure SNVS clk is enabled */ + ldr r11, [r0, #PM_INFO_MX7_CCM_V_OFFSET] + add r11, r11, #0x4000 + ldr r7, =0x3 + str r7, [r11, #CCM_SNVS_LPCG] + + /* check whether it is a standby mode */ + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET] + ldr r7, [r11, #GPC_PGC_C0] + cmp r7, #0 + beq ddr_only_self_refresh + + /* + * The value of r0 is mapped the same in origin table and IRAM table, + * thus no need to care r0 here. + */ + ldr r1, [r0, #PM_INFO_PBASE_OFFSET] + ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET] + ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET] + ldr r4, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET] + + /* + * counting the resume address in iram + * to set it in SRC register. + */ + ldr r6, =imx7_suspend + ldr r7, =resume + sub r7, r7, r6 + add r8, r1, r4 + add r9, r8, r7 + + ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFFSET] + /* store physical resume addr and pm_info address. */ + str r9, [r11, #MX7_SRC_GPR1] + str r1, [r11, #MX7_SRC_GPR2] + + disable_l1_dcache + + store_ttbr1 + + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET] + ldr r7, [r11, #GPC_PGC_FM] + cmp r7, #0 + beq ddr_only_self_refresh + + ddr_enter_retention + /* enter LPSR mode if resume addr is valid */ + ldr r11, [r0, #PM_INFO_MX7_LPSR_V_OFFSET] + ldr r7, [r11] + cmp r7, #0x0 + beq ddr_retention_enter_out + + /* disable STOP mode before entering LPSR */ + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET] + ldr r7, [r11] + bic r7, #0xf + str r7, [r11] + + /* shut down vddsoc to enter lpsr mode */ + ldr r11, [r0, #PM_INFO_MX7_SNVS_V_OFFSET] + ldr r7, [r11, #0x38] + orr r7, r7, #0x60 + str r7, [r11, #0x38] +wait_shutdown: + wfi + nop + nop + nop + nop + b wait_shutdown + +ddr_only_self_refresh: + ddrc_enter_self_refresh + b wfi +ddr_retention_enter_out: + + ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFFSET] + ldr r7, =0x0 + ldr r8, =0x1000 + str r7, [r11, r8] + + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET] + ldr r4, [r11, #MX7D_GPC_IMR1] + ldr r5, [r11, #MX7D_GPC_IMR2] + ldr r6, [r11, #MX7D_GPC_IMR3] + ldr r7, [r11, #MX7D_GPC_IMR4] + + ldr r8, =0xffffffff + str r8, [r11, #MX7D_GPC_IMR1] + str r8, [r11, #MX7D_GPC_IMR2] + str r8, [r11, #MX7D_GPC_IMR3] + str r8, [r11, #MX7D_GPC_IMR4] + + /* + * enable the RBC bypass counter here + * to hold off the interrupts. RBC counter + * = 8 (240us). With this setting, the latency + * from wakeup interrupt to ARM power up + * is ~250uS. + */ + ldr r8, [r11, #0x14] + bic r8, r8, #(0x3f << 24) + orr r8, r8, #(0x8 << 24) + str r8, [r11, #0x14] + + /* enable the counter. */ + ldr r8, [r11, #0x14] + orr r8, r8, #(0x1 << 30) + str r8, [r11, #0x14] + + /* unmask all the GPC interrupts. */ + str r4, [r11, #MX7D_GPC_IMR1] + str r5, [r11, #MX7D_GPC_IMR2] + str r6, [r11, #MX7D_GPC_IMR3] + str r7, [r11, #MX7D_GPC_IMR4] + + /* + * now delay for a short while (3usec) + * ARM is at 1GHz at this point + * so a short loop should be enough. + * this delay is required to ensure that + * the RBC counter can start counting in + * case an interrupt is already pending + * or in case an interrupt arrives just + * as ARM is about to assert DSM_request. + */ + ldr r7, =2000 +rbc_loop: + subs r7, r7, #0x1 + bne rbc_loop +wfi: + /* Zzz, enter stop mode */ + wfi + nop + nop + nop + nop + + mov r5, #0x0 + + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET] + ldr r7, [r11, #GPC_PGC_FM] + cmp r7, #0 + beq wfi_ddr_self_refresh_out + + ddr_exit_retention + b wfi_ddr_retention_out +wfi_ddr_self_refresh_out: + ddrc_exit_self_refresh +wfi_ddr_retention_out: + + /* check whether it is a standby mode */ + ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET] + ldr r7, [r11, #GPC_PGC_C0] + cmp r7, #0 + beq standby_out + + ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFFSET] + ldr r7, =0x1 + ldr r8, =0x1000 + str r7, [r11, r8] + + restore_ttbr1 +standby_out: + pop {r4-r12} + /* return to suspend finish */ + mov pc, lr + +resume: + /* invalidate L1 I-cache first */ + mov r6, #0x0 + mcr p15, 0, r6, c7, c5, 0 + mcr p15, 0, r6, c7, c5, 6 + /* enable the Icache and branch prediction */ + mov r6, #0x1800 + mcr p15, 0, r6, c1, c0, 0 + isb + + /* get physical resume address from pm_info. */ + ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET] + /* clear core0's entry and parameter */ + ldr r11, [r0, #PM_INFO_MX7_SRC_P_OFFSET] + mov r7, #0x0 + str r7, [r11, #MX7_SRC_GPR1] + str r7, [r11, #MX7_SRC_GPR2] + + mov r5, #0x1 + + ldr r11, [r0, #PM_INFO_MX7_GPC_P_OFFSET] + ldr r7, [r11, #GPC_PGC_FM] + cmp r7, #0 + beq dsm_ddr_self_refresh_out + + ddr_exit_retention + b dsm_ddr_retention_out +dsm_ddr_self_refresh_out: + ddrc_exit_self_refresh +dsm_ddr_retention_out: + + mov pc, lr +ENDPROC(imx7_suspend) + +ENTRY(ca7_cpu_resume) + bl v7_invalidate_l1 + b cpu_resume +ENDPROC(ca7_cpu_resume)