1
0
Fork 0

ARM: imx: add i.MX7D power management support

This patch adds i.MX7D power management support, including
low power idle, suspend with FastMix off, A7-M4 AMP power
management support.

Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
[ Aisheng: remove duplicated imx7d_init_late definition ]
Signed-off-by: Dong Aisheng <aisheng.dong@nxp.com>
5.4-rM2-2.2.x-imx-squashed
Anson Huang 2019-04-22 09:25:24 +08:00 committed by Dong Aisheng
parent b199c5a470
commit ee891b4c23
14 changed files with 3997 additions and 18 deletions

View File

@ -40,6 +40,10 @@ config HAVE_IMX_GPC
bool
select PM_GENERIC_DOMAINS if PM
config HAVE_IMX_GPCV2
bool
select PM_GENERIC_DOMAINS if PM
config HAVE_IMX_MMDC
bool
@ -571,6 +575,7 @@ config SOC_IMX7D_CA7
select IMX_GPCV2
select HAVE_IMX_DDRC
select HAVE_IMX_MU
select HAVE_IMX_GPCV2
select KEYBOARD_SNVS_PWRKEY
config SOC_IMX7D_CM4

View File

@ -31,6 +31,8 @@ obj-$(CONFIG_SOC_IMX6SX) += cpuidle-imx6sx.o
AFLAGS_imx6sx_low_power_idle.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6UL) += cpuidle-imx6sx.o
obj-$(CONFIG_SOC_IMX7ULP) += cpuidle-imx7ulp.o
AFLAGS_imx7d_low_power_idle.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX7D_CA7) += cpuidle-imx7d.o imx7d_low_power_idle.o
endif
ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
@ -71,6 +73,7 @@ obj-$(CONFIG_MACH_IMX35_DT) += imx35-dt.o
obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_GPCV2) += gpcv2.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
obj-$(CONFIG_HAVE_IMX_DDRC) += ddrc.o
@ -87,7 +90,7 @@ obj-$(CONFIG_SOC_IMX6SLL) += mach-imx6sl.o
obj-$(CONFIG_SOC_IMX6SX) += mach-imx6sx.o ddr3_freq_imx6sx.o smp_wfe_imx6.o lpddr2_freq_imx6sx.o
obj-$(CONFIG_SOC_IMX6UL) += mach-imx6ul.o
obj-$(CONFIG_SOC_IMX7D_CA7) += mach-imx7d.o pm-imx7.o ddr3_freq_imx7d.o smp_wfe.o \
lpddr3_freq_imx.o
lpddr3_freq_imx.o suspend-imx7.o
obj-$(CONFIG_SOC_IMX7D_CM4) += mach-imx7d-cm4.o
obj-$(CONFIG_SOC_IMX7ULP) += mach-imx7ulp.o pm-imx7ulp.o
@ -103,6 +106,7 @@ AFLAGS_ddr3_freq_imx6sx.o :=-Wa,-march=armv7-a
ifeq ($(CONFIG_SUSPEND),y)
AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
AFLAGS_suspend-imx7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
endif

View File

@ -804,12 +804,16 @@ static int bus_freq_pm_notify(struct notifier_block *nb, unsigned long event,
mutex_lock(&bus_freq_mutex);
if (event == PM_SUSPEND_PREPARE) {
if (cpu_is_imx7d() && imx_src_is_m4_enabled())
imx_mu_lpm_ready(false);
high_bus_count++;
set_high_bus_freq(1);
busfreq_suspended = 1;
} else if (event == PM_POST_SUSPEND) {
busfreq_suspended = 0;
high_bus_count--;
if (cpu_is_imx7d() && imx_src_is_m4_enabled())
imx_mu_lpm_ready(true);
schedule_delayed_work(&bus_freq_daemon,
usecs_to_jiffies(5000000));
}
@ -1057,6 +1061,11 @@ static int busfreq_probe(struct platform_device *pdev)
(clk_get_rate(m4_clk) > LPAPM_CLK))
high_bus_count++;
}
if (cpu_is_imx7d() && imx_src_is_m4_enabled()) {
high_bus_count++;
imx_mu_lpm_ready(true);
}
}
if (err) {

View File

@ -57,9 +57,19 @@ void imx_gpc_set_arm_power_in_lpm(bool power_off);
void imx_gpc_set_l2_mem_power_in_lpm(bool power_off);
void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw);
void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw);
void imx_gpcv2_pre_suspend(bool arm_power_off);
void imx_gpcv2_post_resume(void);
unsigned int imx_gpcv2_is_mf_mix_off(void);
void imx_gpcv2_enable_wakeup_for_m4(void);
void imx_gpcv2_disable_wakeup_for_m4(void);
void imx25_pm_init(void);
void imx27_pm_init(void);
void imx5_pmu_init(void);
#ifdef CONFIG_HAVE_IMX_MU
int imx_mu_lpm_ready(bool ready);
#else
static inline int imx_mu_lpm_ready(bool ready) { return 0; }
#endif
enum mxc_cpu_pwr_mode {
WAIT_CLOCKED, /* wfi only */
@ -110,6 +120,10 @@ static inline void imx_gpcv2_add_m4_wake_up_irq(u32 hwirq, bool enable) {}
#endif
void imx_gpc_hold_m4_in_sleep(void);
void imx_gpc_release_m4_in_sleep(void);
void __init imx_gpcv2_check_dt(void);
void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode);
void imx_gpcv2_set_cpu_power_gate_in_idle(bool pdn);
void imx_gpcv2_enable_rbc(bool enable);
bool imx_mu_is_m4_in_low_freq(void);
bool imx_mu_is_m4_in_stop(void);
void imx_mu_set_m4_run_mode(void);
@ -128,6 +142,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode);
void imx6_set_int_mem_clk_lpm(bool enable);
void imx6sl_set_wait_clk(bool enter);
void imx6sx_low_power_idle(void);
void imx7d_low_power_idle(void);
#ifdef CONFIG_HAVE_IMX_MMDC
int imx_mmdc_get_ddr_type(void);
int imx_mmdc_get_lpddr2_2ch_mode(void);
@ -145,14 +160,18 @@ int imx_cpu_kill(unsigned int cpu);
#ifdef CONFIG_SUSPEND
void v7_cpu_resume(void);
void ca7_cpu_resume(void);
void imx53_suspend(void __iomem *ocram_vbase);
extern const u32 imx53_suspend_sz;
void imx6_suspend(void __iomem *ocram_vbase);
void imx7_suspend(void __iomem *ocram_vbase);
#else
static inline void v7_cpu_resume(void) {}
static inline void ca7_cpu_resume(void) {}
static inline void imx53_suspend(void __iomem *ocram_vbase) {}
static const u32 imx53_suspend_sz;
static inline void imx6_suspend(void __iomem *ocram_vbase) {}
static inline void imx7_suspend(void __iomem *ocram_vbase) {}
#endif
#ifdef CONFIG_HAVE_IMX_DDRC
@ -167,6 +186,7 @@ void imx6dl_pm_init(void);
void imx6sl_pm_init(void);
void imx6sx_pm_init(void);
void imx6ul_pm_init(void);
void imx7d_pm_init(void);
void imx7ulp_pm_init(void);
#ifdef CONFIG_PM

View File

@ -0,0 +1,390 @@
/*
* Copyright (C) 2015 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/busfreq-imx.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/psci.h>
#include <asm/cp15.h>
#include <asm/cpuidle.h>
#include <asm/fncpy.h>
#include <asm/mach/map.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include <asm/tlb.h>
#include <uapi/linux/psci.h>
#include "common.h"
#include "cpuidle.h"
#include "hardware.h"
#define XTALOSC24M_OSC_CONFIG0 0x10
#define XTALOSC24M_OSC_CONFIG1 0x20
#define XTALOSC24M_OSC_CONFIG2 0x30
#define XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_CUR_SHIFT 24
#define XTALOSC24M_OSC_CONFIG0_HYST_MINUS_MASK 0xf
#define XTALOSC24M_OSC_CONFIG0_HYST_MINUS_SHIFT 16
#define XTALOSC24M_OSC_CONFIG0_HYST_PLUS_MASK 0xf
#define XTALOSC24M_OSC_CONFIG0_HYST_PLUS_SHIFT 12
#define XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_SHIFT 4
#define XTALOSC24M_OSC_CONFIG0_ENABLE_SHIFT 1
#define XTALOSC24M_OSC_CONFIG0_START_SHIFT 0
#define XTALOSC24M_OSC_CONFIG1_COUNT_RC_CUR_SHIFT 20
#define XTALOSC24M_OSC_CONFIG1_COUNT_RC_TRG_SHIFT 0
#define XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_MASK 0xfff
#define XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_SHIFT 0
#define XTALOSC_CTRL_24M 0x0
#define XTALOSC_CTRL_24M_RC_OSC_EN_SHIFT 13
#define REG_SET 0x4
static void __iomem *wfi_iram_base;
static void __iomem *wfi_iram_base_phys;
extern unsigned long iram_tlb_phys_addr;
struct imx7_pm_base {
phys_addr_t pbase;
void __iomem *vbase;
};
struct imx7_cpuidle_pm_info {
phys_addr_t vbase; /* The virtual address of pm_info. */
phys_addr_t pbase; /* The physical address of pm_info. */
phys_addr_t resume_addr; /* The physical resume address for asm code */
u32 pm_info_size;
u32 ttbr;
u32 num_online_cpus;
u32 num_lpi_cpus;
atomic_t val;
atomic_t flag0;
atomic_t flag1;
struct imx7_pm_base ddrc_base;
struct imx7_pm_base ccm_base;
struct imx7_pm_base anatop_base;
struct imx7_pm_base src_base;
struct imx7_pm_base iomuxc_gpr_base;
struct imx7_pm_base gpc_base;
struct imx7_pm_base gic_dist_base;
} __aligned(8);
static atomic_t master_lpi = ATOMIC_INIT(0);
static atomic_t master_wait = ATOMIC_INIT(0);
static void (*imx7d_wfi_in_iram_fn)(void __iomem *iram_vbase);
static struct imx7_cpuidle_pm_info *cpuidle_pm_info;
#define MX7D_POWERDWN_IDLE_PARAM \
((1 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
(1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
(PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
#define MX7D_STANDBY_IDLE_PARAM \
((1 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
(1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
(PSCI_POWER_STATE_TYPE_STANDBY << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
/* Mapped for the kernel, unlike cpuidle_pm_info->gic_dist_base.vbase */
static void __iomem *imx7d_cpuidle_gic_base;
static void imx_pen_lock(int cpu)
{
if (cpu == 0) {
atomic_set(&cpuidle_pm_info->flag0, 1);
dsb();
atomic_set(&cpuidle_pm_info->val, cpu);
do {
dsb();
} while (atomic_read(&cpuidle_pm_info->flag1) == 1
&& atomic_read(&cpuidle_pm_info->val) == cpu)
;
} else {
atomic_set(&cpuidle_pm_info->flag1, 1);
dsb();
atomic_set(&cpuidle_pm_info->val, cpu);
do {
dsb();
} while (atomic_read(&cpuidle_pm_info->flag0) == 1
&& atomic_read(&cpuidle_pm_info->val) == cpu)
;
}
}
static void imx_pen_unlock(int cpu)
{
dsb();
if (cpu == 0)
atomic_set(&cpuidle_pm_info->flag0, 0);
else
atomic_set(&cpuidle_pm_info->flag1, 0);
}
static int imx7d_idle_finish(unsigned long val)
{
if (psci_ops.cpu_suspend)
psci_ops.cpu_suspend(MX7D_POWERDWN_IDLE_PARAM, __pa(cpu_resume));
else
imx7d_wfi_in_iram_fn(wfi_iram_base);
return 0;
}
static bool imx7d_gic_sgis_pending(void)
{
void __iomem *sgip_base = imx7d_cpuidle_gic_base + 0x1f20;
return (readl_relaxed(sgip_base + 0x0) |
readl_relaxed(sgip_base + 0x4) |
readl_relaxed(sgip_base + 0x8) |
readl_relaxed(sgip_base + 0xc));
}
static DEFINE_SPINLOCK(psci_lock);
static int imx7d_enter_low_power_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
int mode = get_bus_freq_mode();
if ((index == 1) || ((mode != BUS_FREQ_LOW) && index == 2)) {
index = 1;
if (atomic_inc_return(&master_wait) == num_online_cpus())
imx_gpcv2_set_lpm_mode(WAIT_UNCLOCKED);
cpu_do_idle();
atomic_dec(&master_wait);
imx_gpcv2_set_lpm_mode(WAIT_CLOCKED);
} else {
if (psci_ops.cpu_suspend) {
cpu_pm_enter();
spin_lock(&psci_lock);
if (atomic_inc_return(&master_lpi) == num_online_cpus()) {
if (imx7d_gic_sgis_pending()) {
atomic_dec(&master_lpi);
index = -1;
goto psci_skip_lpi_flow;
}
imx_gpcv2_set_lpm_mode(WAIT_UNCLOCKED);
imx_gpcv2_set_cpu_power_gate_in_idle(true);
cpu_cluster_pm_enter();
}
spin_unlock(&psci_lock);
cpu_suspend(0, imx7d_idle_finish);
spin_lock(&psci_lock);
if (atomic_read(&master_lpi) == num_online_cpus()) {
cpu_cluster_pm_exit();
imx_gpcv2_set_cpu_power_gate_in_idle(false);
imx_gpcv2_set_lpm_mode(WAIT_CLOCKED);
}
atomic_dec(&master_lpi);
psci_skip_lpi_flow:
spin_unlock(&psci_lock);
cpu_pm_exit();
} else {
imx_pen_lock(dev->cpu);
cpuidle_pm_info->num_online_cpus = num_online_cpus();
++cpuidle_pm_info->num_lpi_cpus;
cpu_pm_enter();
if (cpuidle_pm_info->num_lpi_cpus ==
cpuidle_pm_info->num_online_cpus) {
/*
* GPC will not wake on SGIs so check for them
* manually here. At this point we know the other cpu
* is in wfi or waiting for the lock and can't send
* any additional IPIs.
*/
if (imx7d_gic_sgis_pending()) {
index = -1;
goto skip_lpi_flow;
}
imx_gpcv2_set_lpm_mode(WAIT_UNCLOCKED);
imx_gpcv2_set_cpu_power_gate_in_idle(true);
cpu_cluster_pm_enter();
} else {
imx_set_cpu_jump(dev->cpu, ca7_cpu_resume);
}
cpu_suspend(0, imx7d_idle_finish);
if (cpuidle_pm_info->num_lpi_cpus ==
cpuidle_pm_info->num_online_cpus) {
cpu_cluster_pm_exit();
imx_gpcv2_set_cpu_power_gate_in_idle(false);
imx_gpcv2_set_lpm_mode(WAIT_CLOCKED);
}
skip_lpi_flow:
cpu_pm_exit();
--cpuidle_pm_info->num_lpi_cpus;
imx_pen_unlock(dev->cpu);
}
}
return index;
}
static struct cpuidle_driver imx7d_cpuidle_driver = {
.name = "imx7d_cpuidle",
.owner = THIS_MODULE,
.states = {
/* WFI */
ARM_CPUIDLE_WFI_STATE,
/* WAIT MODE */
{
.exit_latency = 50,
.target_residency = 75,
.flags = CPUIDLE_FLAG_TIMER_STOP,
.enter = imx7d_enter_low_power_idle,
.name = "WAIT",
.desc = "Clock off",
},
/* LOW POWER IDLE */
{
.exit_latency = 10000,
.target_residency = 20000,
.flags = CPUIDLE_FLAG_TIMER_STOP,
.enter = imx7d_enter_low_power_idle,
.name = "LOW-POWER-IDLE",
.desc = "ARM power off",
},
},
.state_count = 3,
.safe_state_index = 0,
};
int imx7d_enable_rcosc(void)
{
void __iomem *anatop_base =
(void __iomem *)IMX_IO_P2V(MX7D_ANATOP_BASE_ADDR);
u32 val;
imx_gpcv2_set_lpm_mode(WAIT_CLOCKED);
/* set RC-OSC freq and turn it on */
writel_relaxed(0x1 << XTALOSC_CTRL_24M_RC_OSC_EN_SHIFT,
anatop_base + XTALOSC_CTRL_24M + REG_SET);
/*
* config RC-OSC freq
* tune_enable = 1;tune_start = 1;hyst_plus = 0;hyst_minus = 0;
* osc_prog = 0xa7;
*/
writel_relaxed(
0x4 << XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_CUR_SHIFT |
0xa7 << XTALOSC24M_OSC_CONFIG0_RC_OSC_PROG_SHIFT |
0x1 << XTALOSC24M_OSC_CONFIG0_ENABLE_SHIFT |
0x1 << XTALOSC24M_OSC_CONFIG0_START_SHIFT,
anatop_base + XTALOSC24M_OSC_CONFIG0);
/* set count_trg = 0x2dc */
writel_relaxed(
0x40 << XTALOSC24M_OSC_CONFIG1_COUNT_RC_CUR_SHIFT |
0x2dc << XTALOSC24M_OSC_CONFIG1_COUNT_RC_TRG_SHIFT,
anatop_base + XTALOSC24M_OSC_CONFIG1);
/* wait at least 4ms according to hardware design */
mdelay(6);
/*
* now add some hysteresis, hyst_plus=3, hyst_minus=3
* (the minimum hysteresis that looks good is 2)
*/
val = readl_relaxed(anatop_base + XTALOSC24M_OSC_CONFIG0);
val &= ~((XTALOSC24M_OSC_CONFIG0_HYST_MINUS_MASK <<
XTALOSC24M_OSC_CONFIG0_HYST_MINUS_SHIFT) |
(XTALOSC24M_OSC_CONFIG0_HYST_PLUS_MASK <<
XTALOSC24M_OSC_CONFIG0_HYST_PLUS_SHIFT));
val |= (0x3 << XTALOSC24M_OSC_CONFIG0_HYST_MINUS_SHIFT) |
(0x3 << XTALOSC24M_OSC_CONFIG0_HYST_PLUS_SHIFT);
writel_relaxed(val, anatop_base + XTALOSC24M_OSC_CONFIG0);
/* set the count_1m_trg = 0x2d7 */
val = readl_relaxed(anatop_base + XTALOSC24M_OSC_CONFIG2);
val &= ~(XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_MASK <<
XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_SHIFT);
val |= 0x2d7 << XTALOSC24M_OSC_CONFIG2_COUNT_1M_TRG_SHIFT;
writel_relaxed(val, anatop_base + XTALOSC24M_OSC_CONFIG2);
/*
* hardware design require to write XTALOSC24M_OSC_CONFIG0 or
* XTALOSC24M_OSC_CONFIG1 to
* make XTALOSC24M_OSC_CONFIG2 write work
*/
val = readl_relaxed(anatop_base + XTALOSC24M_OSC_CONFIG1);
writel_relaxed(val, anatop_base + XTALOSC24M_OSC_CONFIG1);
return 0;
}
int __init imx7d_cpuidle_init(void)
{
wfi_iram_base_phys = (void *)(iram_tlb_phys_addr +
MX7_CPUIDLE_OCRAM_ADDR_OFFSET);
/* Make sure wfi_iram_base is 8 byte aligned. */
if ((uintptr_t)(wfi_iram_base_phys) & (FNCPY_ALIGN - 1))
wfi_iram_base_phys += FNCPY_ALIGN -
((uintptr_t)wfi_iram_base_phys % (FNCPY_ALIGN));
wfi_iram_base = (void *)IMX_IO_P2V((unsigned long) wfi_iram_base_phys);
cpuidle_pm_info = wfi_iram_base;
cpuidle_pm_info->vbase = (phys_addr_t) wfi_iram_base;
cpuidle_pm_info->pbase = (phys_addr_t) wfi_iram_base_phys;
cpuidle_pm_info->pm_info_size = sizeof(*cpuidle_pm_info);
cpuidle_pm_info->resume_addr = virt_to_phys(ca7_cpu_resume);
cpuidle_pm_info->num_online_cpus = num_online_cpus();
cpuidle_pm_info->ddrc_base.pbase = MX7D_DDRC_BASE_ADDR;
cpuidle_pm_info->ddrc_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_DDRC_BASE_ADDR);
cpuidle_pm_info->ccm_base.pbase = MX7D_CCM_BASE_ADDR;
cpuidle_pm_info->ccm_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_CCM_BASE_ADDR);
cpuidle_pm_info->anatop_base.pbase = MX7D_ANATOP_BASE_ADDR;
cpuidle_pm_info->anatop_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_ANATOP_BASE_ADDR);
cpuidle_pm_info->src_base.pbase = MX7D_SRC_BASE_ADDR;
cpuidle_pm_info->src_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_SRC_BASE_ADDR);
cpuidle_pm_info->iomuxc_gpr_base.pbase = MX7D_IOMUXC_GPR_BASE_ADDR;
cpuidle_pm_info->iomuxc_gpr_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_IOMUXC_GPR_BASE_ADDR);
cpuidle_pm_info->gpc_base.pbase = MX7D_GPC_BASE_ADDR;
cpuidle_pm_info->gpc_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_GPC_BASE_ADDR);
cpuidle_pm_info->gic_dist_base.pbase = MX7D_GIC_BASE_ADDR;
cpuidle_pm_info->gic_dist_base.vbase =
(void __iomem *)IMX_IO_P2V(MX7D_GIC_BASE_ADDR);
imx7d_cpuidle_gic_base = ioremap(MX7D_GIC_BASE_ADDR, MX7D_GIC_SIZE);
imx7d_enable_rcosc();
/* code size should include cpuidle_pm_info size */
if (!psci_ops.cpu_suspend) {
imx7d_wfi_in_iram_fn = (void *)fncpy(wfi_iram_base +
sizeof(*cpuidle_pm_info),
&imx7d_low_power_idle,
MX7_CPUIDLE_OCRAM_SIZE - sizeof(*cpuidle_pm_info));
}
return cpuidle_register(&imx7d_cpuidle_driver, NULL);
}

View File

@ -9,6 +9,8 @@ extern int imx5_cpuidle_init(void);
extern int imx6q_cpuidle_init(void);
extern int imx6sl_cpuidle_init(void);
extern int imx6sx_cpuidle_init(void);
extern int imx7d_cpuidle_init(void);
extern int imx7d_enable_rcosc(void);
extern int imx7ulp_cpuidle_init(void);
#else
static inline int imx5_cpuidle_init(void)
@ -27,6 +29,14 @@ static inline int imx6sx_cpuidle_init(void)
{
return 0;
}
static inline int imx7d_cpuidle_init(void)
{
return 0;
}
static inline int imx7d_enable_rcosc(void)
{
return 0;
}
static inline int imx7ulp_cpuidle_init(void)
{
return 0;

View File

@ -0,0 +1,851 @@
/*
* Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include "common.h"
#include "hardware.h"
#define IMR_NUM 4
#define GPC_MAX_IRQS (IMR_NUM * 32)
#define GPC_LPCR_A7_BSC 0x0
#define GPC_LPCR_A7_AD 0x4
#define GPC_LPCR_M4 0x8
#define GPC_SLPCR 0x14
#define GPC_MLPCR 0x20
#define GPC_PGC_ACK_SEL_A7 0x24
#define GPC_MISC 0x2c
#define GPC_IMR1_CORE0 0x30
#define GPC_IMR1_CORE1 0x40
#define GPC_IMR1_M4 0x50
#define GPC_SLOT0_CFG 0xb0
#define GPC_PGC_CPU_MAPPING 0xec
#define GPC_CPU_PGC_SW_PUP_REQ 0xf0
#define GPC_PU_PGC_SW_PUP_REQ 0xf8
#define GPC_CPU_PGC_SW_PDN_REQ 0xfc
#define GPC_PU_PGC_SW_PDN_REQ 0x104
#define GPC_GTOR 0x124
#define GPC_PGC_C0 0x800
#define GPC_PGC_C0_PUPSCR 0x804
#define GPC_PGC_SCU_TIMING 0x890
#define GPC_PGC_C1 0x840
#define GPC_PGC_C1_PUPSCR 0x844
#define GPC_PGC_SCU 0x880
#define GPC_PGC_FM 0xa00
#define BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP 0x70000000
#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000
#define BM_LPCR_A7_BSC_LPM1 0xc
#define BM_LPCR_A7_BSC_LPM0 0x3
#define BP_LPCR_A7_BSC_LPM1 2
#define BP_LPCR_A7_BSC_LPM0 0
#define BM_LPCR_M4_MASK_DSM_TRIGGER 0x80000000
#define BM_SLPCR_EN_DSM 0x80000000
#define BM_SLPCR_RBC_EN 0x40000000
#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000
#define BM_SLPCR_VSTBY 0x4
#define BM_SLPCR_SBYOS 0x2
#define BM_SLPCR_BYPASS_PMIC_READY 0x1
#define BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE 0x10000
#define BM_LPCR_A7_AD_L2PGE 0x10000
#define BM_LPCR_A7_AD_EN_C1_PUP 0x800
#define BM_LPCR_A7_AD_EN_C1_IRQ_PUP 0x400
#define BM_LPCR_A7_AD_EN_C0_PUP 0x200
#define BM_LPCR_A7_AD_EN_C0_IRQ_PUP 0x100
#define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10
#define BM_LPCR_A7_AD_EN_C1_PDN 0x8
#define BM_LPCR_A7_AD_EN_C1_WFI_PDN 0x4
#define BM_LPCR_A7_AD_EN_C0_PDN 0x2
#define BM_LPCR_A7_AD_EN_C0_WFI_PDN 0x1
#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2
#define BM_GPC_PGC_PCG 0x1
#define BM_GPC_PGC_CORE_PUPSCR 0x7fff80
#define BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK 0x80000000
#define BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK 0x8000
#define BM_GPC_MLPCR_MEMLP_CTL_DIS 0x1
#define BP_LPCR_A7_BSC_IRQ_SRC 28
#define MAX_SLOT_NUMBER 10
#define A7_LPM_WAIT 0x5
#define A7_LPM_STOP 0xa
enum imx_gpc_slot {
CORE0_A7,
CORE1_A7,
SCU_A7,
FAST_MEGA_MIX,
MIPI_PHY,
PCIE_PHY,
USB_OTG1_PHY,
USB_OTG2_PHY,
USB_HSIC_PHY,
CORE0_M4,
};
static void __iomem *gpc_base;
static u32 gpcv2_wake_irqs[IMR_NUM];
static u32 gpcv2_saved_imrs[IMR_NUM];
static u32 gpcv2_saved_imrs_m4[IMR_NUM];
static u32 gpcv2_mf_irqs[IMR_NUM];
static u32 gpcv2_mf_request_on[IMR_NUM];
static DEFINE_SPINLOCK(gpcv2_lock);
void imx_gpcv2_add_m4_wake_up_irq(u32 hwirq, bool enable)
{
unsigned int idx = hwirq / 32;
unsigned long flags;
u32 mask;
/* Sanity check for SPI irq */
if (hwirq < 32)
return;
mask = 1 << hwirq % 32;
spin_lock_irqsave(&gpcv2_lock, flags);
gpcv2_wake_irqs[idx] = enable ? gpcv2_wake_irqs[idx] | mask :
gpcv2_wake_irqs[idx] & ~mask;
spin_unlock_irqrestore(&gpcv2_lock, flags);
}
static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
{
unsigned int idx = d->hwirq / 32;
unsigned long flags;
u32 mask;
BUG_ON(idx >= IMR_NUM);
mask = 1 << d->hwirq % 32;
spin_lock_irqsave(&gpcv2_lock, flags);
gpcv2_wake_irqs[idx] = on ? gpcv2_wake_irqs[idx] | mask :
gpcv2_wake_irqs[idx] & ~mask;
spin_unlock_irqrestore(&gpcv2_lock, flags);
return 0;
}
void imx_gpcv2_mask_all(void)
{
void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0;
int i;
for (i = 0; i < IMR_NUM; i++) {
gpcv2_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
writel_relaxed(~0, reg_imr1 + i * 4);
}
}
void imx_gpcv2_restore_all(void)
{
void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0;
int i;
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(gpcv2_saved_imrs[i], reg_imr1 + i * 4);
}
void imx_gpcv2_hwirq_unmask(unsigned int hwirq)
{
void __iomem *reg;
u32 val;
reg = gpc_base + GPC_IMR1_CORE0 + (hwirq / 32) * 4;
val = readl_relaxed(reg);
val &= ~(1 << hwirq % 32);
writel_relaxed(val, reg);
}
void imx_gpcv2_hwirq_mask(unsigned int hwirq)
{
void __iomem *reg;
u32 val;
reg = gpc_base + GPC_IMR1_CORE0 + (hwirq / 32) * 4;
val = readl_relaxed(reg);
val |= 1 << (hwirq % 32);
writel_relaxed(val, reg);
}
static void imx_gpcv2_irq_unmask(struct irq_data *d)
{
imx_gpcv2_hwirq_unmask(d->hwirq);
irq_chip_unmask_parent(d);
}
static void imx_gpcv2_irq_mask(struct irq_data *d)
{
imx_gpcv2_hwirq_mask(d->hwirq);
irq_chip_mask_parent(d);
}
void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core,
bool mode, bool ack)
{
u32 val;
if (index >= MAX_SLOT_NUMBER)
pr_err("Invalid slot index!\n");
/* set slot */
writel_relaxed(readl_relaxed(gpc_base + GPC_SLOT0_CFG + index * 4) |
((mode + 1) << (m_core * 2)),
gpc_base + GPC_SLOT0_CFG + index * 4);
if (ack) {
/* set ack */
val = readl_relaxed(gpc_base + GPC_PGC_ACK_SEL_A7);
/* clear dummy ack */
val &= ~(1 << (15 + (mode ? 16 : 0)));
val |= 1 << (m_core + (mode ? 16 : 0));
writel_relaxed(val, gpc_base + GPC_PGC_ACK_SEL_A7);
}
}
void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode)
{
unsigned long flags;
u32 val1, val2;
spin_lock_irqsave(&gpcv2_lock, flags);
val1 = readl_relaxed(gpc_base + GPC_LPCR_A7_BSC);
val2 = readl_relaxed(gpc_base + GPC_SLPCR);
/* all cores' LPM settings must be same */
val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1);
val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN |
BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY);
/*
* GPC: When improper low-power sequence is used,
* the SoC enters low power mode before the ARM core executes WFI.
*
* Software workaround:
* 1) Software should trigger IRQ #32 (IOMUX) to be always pending
* by setting IOMUX_GPR1_IRQ.
* 2) Software should then unmask IRQ #32 in GPC before setting GPC
* Low-Power mode.
* 3) Software should mask IRQ #32 right after GPC Low-Power mode
* is set.
*/
switch (mode) {
case WAIT_CLOCKED:
imx_gpcv2_hwirq_unmask(0);
break;
case WAIT_UNCLOCKED:
val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0;
val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
imx_gpcv2_hwirq_mask(0);
break;
case STOP_POWER_ON:
val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0;
val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
val2 |= BM_SLPCR_EN_DSM;
val2 |= BM_SLPCR_RBC_EN;
val2 |= BM_SLPCR_BYPASS_PMIC_READY;
imx_gpcv2_hwirq_mask(0);
break;
case STOP_POWER_OFF:
val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0;
val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
val2 |= BM_SLPCR_EN_DSM;
val2 |= BM_SLPCR_RBC_EN;
val2 |= BM_SLPCR_SBYOS;
val2 |= BM_SLPCR_VSTBY;
val2 |= BM_SLPCR_BYPASS_PMIC_READY;
imx_gpcv2_hwirq_mask(0);
break;
default:
return;
}
writel_relaxed(val1, gpc_base + GPC_LPCR_A7_BSC);
writel_relaxed(val2, gpc_base + GPC_SLPCR);
spin_unlock_irqrestore(&gpcv2_lock, flags);
}
void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn)
{
u32 val = readl_relaxed(gpc_base + GPC_LPCR_A7_AD);
val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE);
if (pdn)
val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE;
writel_relaxed(val, gpc_base + GPC_LPCR_A7_AD);
}
void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
{
u32 val = readl_relaxed(gpc_base + offset) & (~BM_GPC_PGC_PCG);
if (enable)
val |= BM_GPC_PGC_PCG;
writel_relaxed(val, gpc_base + offset);
}
void imx_gpcv2_set_core1_pdn_pup_by_software(bool pdn)
{
u32 val = readl_relaxed(gpc_base + (pdn ?
GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ));
imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C1);
val |= BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7;
writel_relaxed(val, gpc_base + (pdn ?
GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ));
while ((readl_relaxed(gpc_base + (pdn ?
GPC_CPU_PGC_SW_PDN_REQ : GPC_CPU_PGC_SW_PUP_REQ)) &
BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7) != 0)
;
imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C1);
}
void imx_gpcv2_set_cpu_power_gate_by_wfi(u32 cpu, bool pdn)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&gpcv2_lock, flags);
val = readl_relaxed(gpc_base + GPC_LPCR_A7_AD);
if (cpu == 0) {
if (pdn) {
imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0);
val |= BM_LPCR_A7_AD_EN_C0_WFI_PDN |
BM_LPCR_A7_AD_EN_C0_IRQ_PUP;
} else {
imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0);
val &= ~(BM_LPCR_A7_AD_EN_C0_WFI_PDN |
BM_LPCR_A7_AD_EN_C0_IRQ_PUP);
}
}
if (cpu == 1) {
if (pdn) {
imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C1);
val |= BM_LPCR_A7_AD_EN_C1_WFI_PDN |
BM_LPCR_A7_AD_EN_C1_IRQ_PUP;
} else {
imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C1);
val &= ~(BM_LPCR_A7_AD_EN_C1_WFI_PDN |
BM_LPCR_A7_AD_EN_C1_IRQ_PUP);
}
}
writel_relaxed(val, gpc_base + GPC_LPCR_A7_AD);
spin_unlock_irqrestore(&gpcv2_lock, flags);
}
void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&gpcv2_lock, flags);
val = readl_relaxed(gpc_base + GPC_LPCR_A7_AD);
if (cpu == 0) {
if (pdn)
val |= BM_LPCR_A7_AD_EN_C0_PDN |
BM_LPCR_A7_AD_EN_C0_PUP;
else
val &= ~(BM_LPCR_A7_AD_EN_C0_PDN |
BM_LPCR_A7_AD_EN_C0_PUP);
}
if (cpu == 1) {
if (pdn)
val |= BM_LPCR_A7_AD_EN_C1_PDN |
BM_LPCR_A7_AD_EN_C1_PUP;
else
val &= ~(BM_LPCR_A7_AD_EN_C1_PDN |
BM_LPCR_A7_AD_EN_C1_PUP);
}
writel_relaxed(val, gpc_base + GPC_LPCR_A7_AD);
spin_unlock_irqrestore(&gpcv2_lock, flags);
}
void imx_gpcv2_set_cpu_power_gate_in_idle(bool pdn)
{
unsigned long flags;
u32 cpu;
for_each_possible_cpu(cpu)
imx_gpcv2_set_cpu_power_gate_by_lpm(cpu, pdn);
spin_lock_irqsave(&gpcv2_lock, flags);
imx_gpcv2_set_m_core_pgc(pdn, GPC_PGC_C0);
if (num_online_cpus() > 1)
imx_gpcv2_set_m_core_pgc(pdn, GPC_PGC_C1);
imx_gpcv2_set_m_core_pgc(pdn, GPC_PGC_SCU);
imx_gpcv2_set_plat_power_gate_by_lpm(pdn);
if (pdn) {
imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false);
if (num_online_cpus() > 1)
imx_gpcv2_set_slot_ack(2, CORE1_A7, false, false);
imx_gpcv2_set_slot_ack(3, SCU_A7, false, true);
imx_gpcv2_set_slot_ack(6, SCU_A7, true, false);
if (num_online_cpus() > 1)
imx_gpcv2_set_slot_ack(6, CORE1_A7, true, false);
imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true);
} else {
writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 0 * 0x4);
writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 2 * 0x4);
writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 3 * 0x4);
writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 6 * 0x4);
writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 7 * 0x4);
writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + 8 * 0x4);
writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK |
BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK,
gpc_base + GPC_PGC_ACK_SEL_A7);
imx_gpcv2_enable_rbc(false);
}
spin_unlock_irqrestore(&gpcv2_lock, flags);
}
void imx_gpcv2_set_mix_phy_gate_by_lpm(u32 pdn_index, u32 pup_index)
{
/* set power down slot */
writel_relaxed(1 << (FAST_MEGA_MIX * 2),
gpc_base + GPC_SLOT0_CFG + pdn_index * 4);
/* set power up slot */
writel_relaxed(1 << (FAST_MEGA_MIX * 2 + 1),
gpc_base + GPC_SLOT0_CFG + pup_index * 4);
}
unsigned int imx_gpcv2_is_mf_mix_off(void)
{
return readl_relaxed(gpc_base + GPC_PGC_FM);
}
static void imx_gpcv2_mf_mix_off(void)
{
int i;
for (i = 0; i < IMR_NUM; i++)
if (((gpcv2_wake_irqs[i] | gpcv2_mf_request_on[i]) &
gpcv2_mf_irqs[i]) != 0)
return;
pr_info("Turn off Mega/Fast mix in DSM\n");
imx_gpcv2_set_slot_ack(1, FAST_MEGA_MIX, false, false);
imx_gpcv2_set_slot_ack(5, FAST_MEGA_MIX, true, false);
imx_gpcv2_set_m_core_pgc(true, GPC_PGC_FM);
}
int imx_gpcv2_mf_power_on(unsigned int irq, unsigned int on)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long hwirq = desc->irq_data.hwirq;
unsigned int idx = hwirq / 32;
unsigned long flags;
u32 mask = 1 << (hwirq % 32);
BUG_ON(idx >= IMR_NUM);
spin_lock_irqsave(&gpcv2_lock, flags);
gpcv2_mf_request_on[idx] = on ? gpcv2_mf_request_on[idx] | mask :
gpcv2_mf_request_on[idx] & ~mask;
spin_unlock_irqrestore(&gpcv2_lock, flags);
return 0;
}
void imx_gpcv2_enable_rbc(bool enable)
{
u32 val;
/*
* need to mask all interrupts in GPC before
* operating RBC configurations
*/
imx_gpcv2_mask_all();
/* configure RBC enable bit */
val = readl_relaxed(gpc_base + GPC_SLPCR);
val &= ~BM_SLPCR_RBC_EN;
val |= enable ? BM_SLPCR_RBC_EN : 0;
writel_relaxed(val, gpc_base + GPC_SLPCR);
/* configure RBC count */
val = readl_relaxed(gpc_base + GPC_SLPCR);
val &= ~BM_SLPCR_REG_BYPASS_COUNT;
val |= enable ? BM_SLPCR_REG_BYPASS_COUNT : 0;
writel(val, gpc_base + GPC_SLPCR);
/*
* need to delay at least 2 cycles of CKIL(32K)
* due to hardware design requirement, which is
* ~61us, here we use 65us for safe
*/
udelay(65);
/* restore GPC interrupt mask settings */
imx_gpcv2_restore_all();
}
void imx_gpcv2_pre_suspend(bool arm_power_off)
{
void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0;
int i;
if (arm_power_off) {
imx_gpcv2_set_lpm_mode(STOP_POWER_OFF);
/* enable core0 power down/up with low power mode */
imx_gpcv2_set_cpu_power_gate_by_lpm(0, true);
/* enable plat power down with low power mode */
imx_gpcv2_set_plat_power_gate_by_lpm(true);
/*
* To avoid confuse, we use slot 0~4 for power down,
* slot 5~9 for power up.
*
* Power down slot sequence:
* Slot0 -> CORE0
* Slot1 -> Mega/Fast MIX
* Slot2 -> SCU
*
* Power up slot sequence:
* Slot5 -> Mega/Fast MIX
* Slot6 -> SCU
* Slot7 -> CORE0
*/
imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false);
imx_gpcv2_set_slot_ack(2, SCU_A7, false, true);
if ((!imx_src_is_m4_enabled()) ||
(imx_src_is_m4_enabled() && imx_mu_is_m4_in_stop()))
imx_gpcv2_mf_mix_off();;
imx_gpcv2_set_slot_ack(6, SCU_A7, true, false);
imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true);
/* enable core0, scu */
imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0);
imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU);
} else {
imx_gpcv2_set_lpm_mode(STOP_POWER_ON);
}
for (i = 0; i < IMR_NUM; i++) {
gpcv2_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
writel_relaxed(~gpcv2_wake_irqs[i], reg_imr1 + i * 4);
}
}
void imx_gpcv2_enable_wakeup_for_m4(void)
{
void __iomem *reg_imr2 = gpc_base + GPC_IMR1_M4;
u32 i;
for (i = 0; i < IMR_NUM; i++) {
gpcv2_saved_imrs_m4[i] = readl_relaxed(reg_imr2 + i * 4);
writel_relaxed(~gpcv2_wake_irqs[i], reg_imr2 + i * 4);
}
}
void imx_gpcv2_disable_wakeup_for_m4(void)
{
void __iomem *reg_imr2 = gpc_base + GPC_IMR1_M4;
u32 i;
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(gpcv2_saved_imrs_m4[i], reg_imr2 + i * 4);
}
void imx_gpcv2_post_resume(void)
{
void __iomem *reg_imr1 = gpc_base + GPC_IMR1_CORE0;
int i, val;
/* only external IRQs to wake up LPM and core 0/1 */
val = readl_relaxed(gpc_base + GPC_LPCR_A7_BSC);
val |= BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP;
writel_relaxed(val, gpc_base + GPC_LPCR_A7_BSC);
/* mask m4 dsm trigger if M4 NOT enabled */
if (!imx_src_is_m4_enabled())
writel_relaxed(readl_relaxed(gpc_base + GPC_LPCR_M4) |
BM_LPCR_M4_MASK_DSM_TRIGGER, gpc_base + GPC_LPCR_M4);
/* set mega/fast mix in A7 domain */
writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_MAPPING);
/* set SCU timing */
writel_relaxed((0x59 << 10) | 0x5B | (0x2 << 20),
gpc_base + GPC_PGC_SCU_TIMING);
/* set C0/C1 power up timming per design requirement */
val = readl_relaxed(gpc_base + GPC_PGC_C0_PUPSCR);
val &= ~BM_GPC_PGC_CORE_PUPSCR;
val |= (0x1A << 7);
writel_relaxed(val, gpc_base + GPC_PGC_C0_PUPSCR);
val = readl_relaxed(gpc_base + GPC_PGC_C1_PUPSCR);
val &= ~BM_GPC_PGC_CORE_PUPSCR;
val |= (0x1A << 7);
writel_relaxed(val, gpc_base + GPC_PGC_C1_PUPSCR);
val = readl_relaxed(gpc_base + GPC_SLPCR);
val &= ~(BM_SLPCR_EN_DSM);
if (!imx_src_is_m4_enabled())
val &= ~(BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN |
BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY);
val |= BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE;
writel_relaxed(val, gpc_base + GPC_SLPCR);
if (imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
/* disable memory low power mode */
val = readl_relaxed(gpc_base + GPC_MLPCR);
val |= BM_GPC_MLPCR_MEMLP_CTL_DIS;
writel_relaxed(val, gpc_base + GPC_MLPCR);
}
for (i = 0; i < IMR_NUM; i++)
writel_relaxed(gpcv2_saved_imrs[i], reg_imr1 + i * 4);
imx_gpcv2_set_lpm_mode(WAIT_CLOCKED);
imx_gpcv2_set_cpu_power_gate_by_lpm(0, false);
imx_gpcv2_set_plat_power_gate_by_lpm(false);
imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0);
imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU);
imx_gpcv2_set_m_core_pgc(false, GPC_PGC_FM);
for (i = 0; i < MAX_SLOT_NUMBER; i++){
if (i == 1 || i == 5) /* skip slts m4 uses */
continue;
writel_relaxed(0x0, gpc_base + GPC_SLOT0_CFG + i * 0x4);
}
writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK |
BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK,
gpc_base + GPC_PGC_ACK_SEL_A7);
/* disable RBC */
imx_gpcv2_enable_rbc(false);
}
static struct irq_chip imx_gpcv2_chip = {
.name = "GPCV2",
.irq_eoi = irq_chip_eoi_parent,
.irq_mask = imx_gpcv2_irq_mask,
.irq_unmask = imx_gpcv2_irq_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_wake = imx_gpcv2_irq_set_wake,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
};
static int imx_gpcv2_domain_xlate(struct irq_domain *domain,
struct device_node *controller,
const u32 *intspec,
unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (irq_domain_get_of_node(domain) != controller)
return -EINVAL; /* Shouldn't happen, really... */
if (intsize != 3)
return -EINVAL; /* Not GIC compliant */
if (intspec[0] != 0)
return -EINVAL; /* No PPI should point to this domain */
*out_hwirq = intspec[1];
*out_type = intspec[2];
return 0;
}
static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
unsigned int irq,
unsigned int nr_irqs, void *data)
{
struct irq_fwspec *fwspec = data;
struct irq_fwspec parent_fwspec;
irq_hw_number_t hwirq;
int i;
if (fwspec->param_count != 3)
return -EINVAL; /* Not GIC compliant */
if (fwspec->param[0] != 0)
return -EINVAL; /* No PPI should point to this domain */
hwirq = fwspec->param[1];
if (hwirq >= GPC_MAX_IRQS)
return -EINVAL; /* Can't deal with this */
for (i = 0; i < nr_irqs; i++)
irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
&imx_gpcv2_chip, NULL);
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 3;
parent_fwspec.param[0] = 0;
parent_fwspec.param[1] = hwirq;
parent_fwspec.param[2] = fwspec->param[2];
return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
&parent_fwspec);
}
static struct irq_domain_ops imx_gpcv2_domain_ops = {
.xlate = imx_gpcv2_domain_xlate,
.alloc = imx_gpcv2_domain_alloc,
.free = irq_domain_free_irqs_common,
};
static int __init imx_gpcv2_init(struct device_node *node,
struct device_node *parent)
{
struct irq_domain *parent_domain, *domain;
int i, val;
if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name);
return -ENODEV;
}
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("%s: unable to obtain parent domain\n", node->full_name);
return -ENXIO;
}
gpc_base = of_iomap(node, 0);
if (WARN_ON(!gpc_base))
return -ENOMEM;
domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
node, &imx_gpcv2_domain_ops,
NULL);
if (!domain) {
iounmap(gpc_base);
return -ENOMEM;
}
/* Initially mask all interrupts */
for (i = 0; i < IMR_NUM; i++) {
writel_relaxed(~0, gpc_base + GPC_IMR1_CORE0 + i * 4);
writel_relaxed(~0, gpc_base + GPC_IMR1_CORE1 + i * 4);
}
/*
* Due to hardware design requirement, need to make sure GPR
* interrupt(#32) is unmasked during RUN mode to avoid entering
* DSM by mistake.
*/
writel_relaxed(~0x1, gpc_base + GPC_IMR1_CORE0);
/* Read supported wakeup source in M/F domain */
if (cpu_is_imx7d()) {
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 0,
&gpcv2_mf_irqs[0]);
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 1,
&gpcv2_mf_irqs[1]);
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 2,
&gpcv2_mf_irqs[2]);
of_property_read_u32_index(node, "fsl,mf-mix-wakeup-irq", 3,
&gpcv2_mf_irqs[3]);
if (!(gpcv2_mf_irqs[0] | gpcv2_mf_irqs[1] |
gpcv2_mf_irqs[2] | gpcv2_mf_irqs[3]))
pr_info("No wakeup source in Mega/Fast domain found!\n");
}
/* only external IRQs to wake up LPM and core 0/1 */
val = readl_relaxed(gpc_base + GPC_LPCR_A7_BSC);
val |= BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP;
writel_relaxed(val, gpc_base + GPC_LPCR_A7_BSC);
/* mask m4 dsm trigger if M4 NOT enabled */
if (!imx_src_is_m4_enabled())
writel_relaxed(readl_relaxed(gpc_base + GPC_LPCR_M4) |
BM_LPCR_M4_MASK_DSM_TRIGGER, gpc_base + GPC_LPCR_M4);
/* set mega/fast mix in A7 domain */
writel_relaxed(0x1, gpc_base + GPC_PGC_CPU_MAPPING);
/* set SCU timing */
writel_relaxed((0x59 << 10) | 0x5B | (0x2 << 20),
gpc_base + GPC_PGC_SCU_TIMING);
/* set C0/C1 power up timming per design requirement */
val = readl_relaxed(gpc_base + GPC_PGC_C0_PUPSCR);
val &= ~BM_GPC_PGC_CORE_PUPSCR;
val |= (0x1A << 7);
writel_relaxed(val, gpc_base + GPC_PGC_C0_PUPSCR);
val = readl_relaxed(gpc_base + GPC_PGC_C1_PUPSCR);
val &= ~BM_GPC_PGC_CORE_PUPSCR;
val |= (0x1A << 7);
writel_relaxed(val, gpc_base + GPC_PGC_C1_PUPSCR);
writel_relaxed(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK |
BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK,
gpc_base + GPC_PGC_ACK_SEL_A7);
val = readl_relaxed(gpc_base + GPC_SLPCR);
val &= ~(BM_SLPCR_EN_DSM);
if (!imx_src_is_m4_enabled())
val &= ~(BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN |
BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY);
val |= BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE;
writel_relaxed(val, gpc_base + GPC_SLPCR);
if (imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
/* disable memory low power mode */
val = readl_relaxed(gpc_base + GPC_MLPCR);
val |= BM_GPC_MLPCR_MEMLP_CTL_DIS;
writel_relaxed(val, gpc_base + GPC_MLPCR);
}
/* disable RBC */
imx_gpcv2_enable_rbc(false);
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
* later the GPC power domain driver will not be skipped.
*/
of_node_clear_flag(node, OF_POPULATED);
return 0;
}
/*
* We cannot use the IRQCHIP_DECLARE macro that lives in
* drivers/irqchip, so we're forced to roll our own. Not very nice.
*/
OF_DECLARE_2(irqchip, imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_init);
void __init imx_gpcv2_check_dt(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-gpc");
if (WARN_ON(!np))
return;
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
/* map GPC, so that at least CPUidle and WARs keep working */
gpc_base = of_iomap(np, 0);
}
}

View File

@ -21,6 +21,17 @@ diag_reg_offset:
ENTRY(v7_secondary_startup)
ARM_BE8(setend be) @ go BE8 if entered LE
mrc p15, 0, r0, c0, c0, 0
ldr r1, =0xf00
orr r1, r1, #0xff
mov r0, r0, lsr #4
and r0, r0, r1
/* 0xc07 is cortex A7's ID */
ldr r1, =0xc00
orr r1, r1, #0x7
cmp r0, r1
beq secondary_startup
set_diag_reg
b secondary_startup
ENDPROC(v7_secondary_startup)

View File

@ -0,0 +1,788 @@
/*
* Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/linkage.h>
#define PM_INFO_VBASE_OFFSET 0x0
#define PM_INFO_PBASE_OFFSET 0x4
#define PM_INFO_RESUME_ADDR_OFFSET 0x8
#define PM_INFO_PM_INFO_SIZE_OFFSET 0xc
#define PM_INFO_PM_INFO_TTBR_OFFSET 0x10
#define PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET 0x14
#define PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET 0x18
#define PM_INFO_VAL_OFFSET 0x1c
#define PM_INFO_FLAG0_OFFSET 0x20
#define PM_INFO_FLAG1_OFFSET 0x24
#define PM_INFO_MX7D_DDRC_P_OFFSET 0x28
#define PM_INFO_MX7D_DDRC_V_OFFSET 0x2c
#define PM_INFO_MX7D_CCM_P_OFFSET 0x30
#define PM_INFO_MX7D_CCM_V_OFFSET 0x34
#define PM_INFO_MX7D_ANATOP_P_OFFSET 0x38
#define PM_INFO_MX7D_ANATOP_V_OFFSET 0x3c
#define PM_INFO_MX7D_SRC_P_OFFSET 0x40
#define PM_INFO_MX7D_SRC_V_OFFSET 0x44
#define PM_INFO_MX7D_IOMUXC_GPR_P_OFFSET 0x48
#define PM_INFO_MX7D_IOMUXC_GPR_V_OFFSET 0x4c
#define PM_INFO_MX7D_GPC_P_OFFSET 0x50
#define PM_INFO_MX7D_GPC_V_OFFSET 0x54
#define PM_INFO_MX7D_GIC_DIST_P_OFFSET 0x58
#define PM_INFO_MX7D_GIC_DIST_V_OFFSET 0x5c
#define MX7D_SRC_GPR1 0x74
#define MX7D_SRC_GPR2 0x78
#define MX7D_SRC_GPR3 0x7c
#define MX7D_SRC_GPR4 0x80
#define MX7D_GPC_IMR1 0x30
#define MX7D_GPC_IMR2 0x34
#define MX7D_GPC_IMR3 0x38
#define MX7D_GPC_IMR4 0x3c
#define DDRC_STAT 0x4
#define DDRC_PWRCTL 0x30
#define DDRC_DBG1 0x304
#define DDRC_DBGCAM 0x308
#define DDRC_PSTAT 0x3fc
#define DDRC_PCTRL_0 0x490
/*
* imx_pen_lock
*
* The reference link of Peterson's algorithm:
* http://en.wikipedia.org/wiki/Peterson's_algorithm
*
* val1 = r1 = !turn (inverted from Peterson's algorithm)
* on cpu 0:
* r2 = flag[0] (in flag0)
* r3 = flag[1] (in flag1)
* on cpu1:
* r2 = flag[1] (in flag1)
* r3 = flag[0] (in flag0)
*
*/
.macro imx_pen_lock
mov r8, r0
mrc p15, 0, r5, c0, c0, 5
and r5, r5, #3
add r6, r8, #PM_INFO_VAL_OFFSET
cmp r5, #0
addeq r7, r8, #PM_INFO_FLAG0_OFFSET
addeq r8, r8, #PM_INFO_FLAG1_OFFSET
addne r7, r8, #PM_INFO_FLAG1_OFFSET
addne r8, r8, #PM_INFO_FLAG0_OFFSET
mov r9, #1
str r9, [r7]
dsb
str r5, [r6]
1:
dsb
ldr r9, [r8]
cmp r9, #1
ldreq r9, [r6]
cmpeq r9, r5
beq 1b
.endm
.macro imx_pen_unlock
dsb
mrc p15, 0, r6, c0, c0, 5
and r6, r6, #3
cmp r6, #0
addeq r7, r0, #PM_INFO_FLAG0_OFFSET
addne r7, r0, #PM_INFO_FLAG1_OFFSET
mov r9, #0
str r9, [r7]
.endm
.macro disable_l1_dcache
push {r0 - r12, lr}
ldr r7, =v7_flush_dcache_all
mov lr, pc
mov pc, r7
pop {r0 - r12, lr}
/* disable d-cache */
mrc p15, 0, r7, c1, c0, 0
bic r7, r7, #(1 << 2)
mcr p15, 0, r7, c1, c0, 0
dsb
isb
push {r0 - r12, lr}
ldr r7, =v7_flush_dcache_all
mov lr, pc
mov pc, r7
pop {r0 - r12, lr}
#ifdef CONFIG_SMP
clrex
/* Turn off SMP bit. */
mrc p15, 0, r8, c1, c0, 1
bic r8, r8, #0x40
mcr p15, 0, r8, c1, c0, 1
isb
dsb
#endif
.endm
.macro tlb_set_to_ocram
/* save ttbr */
mrc p15, 0, r7, c2, c0, 1
str r7, [r0, #PM_INFO_PM_INFO_TTBR_OFFSET]
/*
* To ensure no page table walks occur in DDR, we
* have a another page table stored in IRAM that only
* contains entries pointing to IRAM, AIPS1 and AIPS2.
* We need to set the TTBR1 to the new IRAM TLB.
* Do the following steps:
* 1. Flush the Branch Target Address Cache (BTAC)
* 2. Set TTBR1 to point to IRAM page table.
* 3. Disable page table walks in TTBR0 (PD0 = 1)
* 4. Set TTBR0.N=1, implying 0-2G is translated by TTBR0
* and 2-4G is translated by TTBR1.
*/
/* Disable Branch Prediction, Z bit in SCTLR. */
mrc p15, 0, r6, c1, c0, 0
bic r6, r6, #0x800
mcr p15, 0, r6, c1, c0, 0
/* Flush the BTAC. */
ldr r6, =0x0
mcr p15, 0, r6, c7, c1, 6
ldr r6, =iram_tlb_phys_addr
ldr r7, [r6]
dsb
isb
/* Store the IRAM table in TTBR1 */
mcr p15, 0, r7, c2, c0, 1
/* Read TTBCR and set PD0=1, N = 1 */
mrc p15, 0, r6, c2, c0, 2
orr r6, r6, #0x11
mcr p15, 0, r6, c2, c0, 2
dsb
isb
/* flush the TLB */
ldr r6, =0x0
mcr p15, 0, r6, c8, c3, 0
.endm
.macro tlb_back_to_ddr
/* Read TTBCR and set PD0=0, N = 0 */
mrc p15, 0, r6, c2, c0, 2
bic r6, r6, #0x11
mcr p15, 0, r6, c2, c0, 2
dsb
isb
/* flush the TLB */
ldr r6, =0x0
mcr p15, 0, r6, c8, c3, 0
/* Enable Branch Prediction, Z bit in SCTLR. */
mrc p15, 0, r6, c1, c0, 0
orr r6, r6, #0x800
mcr p15, 0, r6, c1, c0, 0
/* Flush the Branch Target Address Cache (BTAC) */
ldr r6, =0x0
mcr p15, 0, r6, c7, c1, 6
/* restore ttbr */
ldr r7, [r0, #PM_INFO_PM_INFO_TTBR_OFFSET]
mcr p15, 0, r7, c2, c0, 1
.endm
/* r10 must be DDRC base address */
.macro ddrc_enter_self_refresh
ldr r10, [r0, #PM_INFO_MX7D_DDRC_V_OFFSET]
/* disable port */
ldr r7, =0x0
str r7, [r10, #DDRC_PCTRL_0]
/* let DDR out of self-refresh */
ldr r7, =0x0
str r7, [r10, #DDRC_PWRCTL]
/* wait rw port_busy clear */
ldr r6, =(0x1 << 16)
orr r6, r6, #0x1
2:
ldr r7, [r10, #DDRC_PSTAT]
ands r7, r7, r6
bne 2b
ldr r7, =0x1
str r7, [r10, #DDRC_DBG1]
ldr r6, =0x36000000
11:
ldr r7, [r10, #DDRC_DBGCAM]
and r7, r7, r6
cmp r7, r6
bne 11b
/* enter self-refresh bit 5 */
ldr r7, =(0x1 << 5)
str r7, [r10, #DDRC_PWRCTL]
/* wait until self-refresh mode entered */
3:
ldr r7, [r10, #DDRC_STAT]
and r7, r7, #0x3
cmp r7, #0x3
bne 3b
4:
ldr r7, [r10, #DDRC_STAT]
ands r7, r7, #0x20
beq 4b
/* disable dram clk */
ldr r7, [r10, #DDRC_PWRCTL]
orr r7, r7, #(1 << 3)
str r7, [r10, #DDRC_PWRCTL]
/*
* TO1.1 adds feature of DDR pads power down,
* although TO1.0 has no such function, but it is
* NOT harmful to program GPR registers for TO1.0,
* it can avoid the logic of version check in idle
* thread.
*/
ldr r10, [r0, #PM_INFO_MX7D_IOMUXC_GPR_V_OFFSET]
ldr r7, =0xf0000
str r7, [r10]
/* delay 20us, measured by gpio */
ldr r7, =20
12:
subs r7, r7, #0x1
bne 12b
.endm
/* r10 must be DDRC base address */
.macro ddrc_exit_self_refresh
cmp r5, #0x1
ldreq r10, [r0, #PM_INFO_MX7D_IOMUXC_GPR_P_OFFSET]
ldrne r10, [r0, #PM_INFO_MX7D_IOMUXC_GPR_V_OFFSET]
ldr r7, =0x0
str r7, [r10]
ldr r7, =20
13:
subs r7, r7, #0x1
bne 13b
cmp r5, #0x1
ldreq r10, [r0, #PM_INFO_MX7D_DDRC_P_OFFSET]
ldrne r10, [r0, #PM_INFO_MX7D_DDRC_V_OFFSET]
ldr r7, =0x0
str r7, [r10, #DDRC_DBG1]
ldr r6, =0x30000000
14:
ldr r7, [r10, #DDRC_DBGCAM]
and r7, r7, r6
cmp r7, r6
bne 14b
/* let DDR out of self-refresh */
ldr r7, =0x0
str r7, [r10, #DDRC_PWRCTL]
/* wait until self-refresh mode exited */
5:
ldr r7, [r10, #DDRC_STAT]
and r7, r7, #0x3
cmp r7, #0x3
beq 5b
/* enable auto self-refresh */
ldr r7, [r10, #DDRC_PWRCTL]
orr r7, r7, #(1 << 0)
str r7, [r10, #DDRC_PWRCTL]
ldr r7, =0x1
str r7, [r10, #DDRC_PCTRL_0]
.endm
.macro pll_do_wait_lock
6:
ldr r7, [r10, r8]
ands r7, #0x80000000
beq 6b
.endm
.macro ccm_enter_idle
ldr r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET]
/* ungate pfd1 332m for lower axi */
ldr r7, =0x8000
str r7, [r10, #0xc8]
ldr r10, [r0, #PM_INFO_MX7D_CCM_V_OFFSET]
/* switch ARM CLK to OSC */
ldr r8, =0x8000
ldr r7, [r10, r8]
bic r7, r7, #0x7000000
str r7, [r10, r8]
/* lower AXI clk from 24MHz to 3MHz */
ldr r8, =0x8800
ldr r7, [r10, r8]
orr r7, r7, #0x7
str r7, [r10, r8]
/* lower AHB clk from 24MHz to 3MHz */
ldr r8, =0x9000
ldr r7, [r10, r8]
orr r7, r7, #0x7
str r7, [r10, r8]
/* gate dram clk */
ldr r8, =0x9880
ldr r7, [r10, r8]
bic r7, r7, #0x10000000
str r7, [r10, r8]
ldr r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET]
/* gate pfd1 332m */
ldr r7, =0x8000
str r7, [r10, #0xc4]
/* gate system pll pfd div 1 */
ldr r7, =0x10
str r7, [r10, #0xb4]
/* power down ARM, 480 and DRAM PLL */
ldr r7, =0x1000
str r7, [r10, #0x64]
str r7, [r10, #0xb4]
ldr r7, =0x100000
str r7, [r10, #0x74]
.endm
.macro ccm_exit_idle
cmp r5, #0x1
ldreq r10, [r0, #PM_INFO_MX7D_ANATOP_P_OFFSET]
ldrne r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET]
/* power up ARM, 480 and DRAM PLL */
ldr r7, =0x1000
str r7, [r10, #0x68]
ldr r8, =0x60
pll_do_wait_lock
ldr r7, =0x1000
str r7, [r10, #0xb8]
ldr r8, =0xb0
pll_do_wait_lock
ldr r7, =0x100000
str r7, [r10, #0x78]
ldr r8, =0x70
pll_do_wait_lock
/* ungate pfd1 332m for lower axi */
ldr r7, =0x8000
str r7, [r10, #0xc8]
/* ungate system pll pfd div 1 */
ldr r7, =0x10
str r7, [r10, #0xb8]
cmp r5, #0x1
ldreq r10, [r0, #PM_INFO_MX7D_CCM_P_OFFSET]
ldrne r10, [r0, #PM_INFO_MX7D_CCM_V_OFFSET]
/* switch ARM CLK to PLL */
ldr r8, =0x8000
ldr r7, [r10, r8]
orr r7, r7, #0x1000000
str r7, [r10, r8]
/* restore AXI clk from 3MHz to 24MHz */
ldr r8, =0x8800
ldr r7, [r10, r8]
bic r7, r7, #0x7
str r7, [r10, r8]
/* restore AHB clk from 3MHz to 24MHz */
ldr r8, =0x9000
ldr r7, [r10, r8]
bic r7, r7, #0x7
str r7, [r10, r8]
/* ungate dram clk */
ldr r8, =0x9880
ldr r7, [r10, r8]
orr r7, r7, #0x10000000
str r7, [r10, r8]
cmp r5, #0x1
ldreq r10, [r0, #PM_INFO_MX7D_ANATOP_P_OFFSET]
ldrne r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET]
/* gate pfd1 332m for lower axi */
ldr r7, =0x8000
str r7, [r10, #0xc4]
.endm
.macro anatop_enter_idle
ldr r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET]
/* XTAL to RC-OSC switch */
ldr r7, [r10]
orr r7, r7, #0x1000
str r7, [r10]
/* power down XTAL */
ldr r7, [r10]
orr r7, r7, #0x1
str r7, [r10]
/* enable weak 1P0A */
ldr r7, [r10, #0x200]
orr r7, r7, #0x40000
str r7, [r10, #0x200]
/* disable LDO 1P0A */
ldr r7, [r10, #0x200]
bic r7, r7, #0x1
str r7, [r10, #0x200]
/* disable LDO 1P0D */
ldr r7, [r10, #0x210]
bic r7, r7, #0x1
str r7, [r10, #0x210]
/* disable LDO 1P2 */
ldr r7, [r10, #0x220]
bic r7, r7, #0x1
str r7, [r10, #0x220]
/* switch to low power bandgap */
ldr r7, [r10, #0x270]
orr r7, r7, #0x400
str r7, [r10, #0x270]
/* power down normal bandgap */
orr r7, r7, #0x1
str r7, [r10, #0x270]
.endm
.macro anatop_exit_idle
cmp r5, #0x1
ldreq r10, [r0, #PM_INFO_MX7D_ANATOP_P_OFFSET]
ldrne r10, [r0, #PM_INFO_MX7D_ANATOP_V_OFFSET]
/* power on normal bandgap */
ldr r7, [r10, #0x270]
bic r7, r7, #0x1
str r7, [r10, #0x270]
/* switch to normal bandgap */
bic r7, r7, #0x400
str r7, [r10, #0x270]
/* enable LDO 1P2 */
ldr r7, [r10, #0x220]
orr r7, r7, #0x1
str r7, [r10, #0x220]
7:
ldr r7, [r10, #0x220]
ands r7, #0x20000
beq 7b
/* enable LDO 1P0D */
ldr r7, [r10, #0x210]
orr r7, r7, #0x1
str r7, [r10, #0x210]
8:
ldr r7, [r10, #0x210]
ands r7, #0x20000
beq 8b
/* enable LDO 1P0A */
ldr r7, [r10, #0x200]
orr r7, r7, #0x1
str r7, [r10, #0x200]
9:
ldr r7, [r10, #0x200]
ands r7, #0x20000
beq 9b
/* disable weak 1P0A */
ldr r7, [r10, #0x200]
bic r7, r7, #0x40000
str r7, [r10, #0x200]
/* power up XTAL and wait */
ldr r7, [r10]
bic r7, r7, #0x1
str r7, [r10]
10:
ldr r7, [r10]
ands r7, r7, #0x4
beq 10b
/* RC-OSC to XTAL switch */
ldr r7, [r10]
bic r7, r7, #0x1000
str r7, [r10]
.endm
.extern iram_tlb_phys_addr
.align 3
ENTRY(imx7d_low_power_idle)
push {r0 - r12}
/* get necessary info from pm_info */
ldr r1, [r0, #PM_INFO_PBASE_OFFSET]
ldr r2, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET]
/*
* counting the resume address in iram
* to set it in SRC register.
*/
ldr r5, =imx7d_low_power_idle
ldr r6, =wakeup
sub r6, r6, r5
add r8, r1, r2
add r3, r8, r6
/* r11 is cpu id */
mrc p15, 0, r11, c0, c0, 5
and r11, r11, #3
cmp r11, #0x0
ldreq r6, =MX7D_SRC_GPR1
ldreq r7, =MX7D_SRC_GPR2
ldrne r6, =MX7D_SRC_GPR3
ldrne r7, =MX7D_SRC_GPR4
/* store physical resume addr and pm_info address. */
ldr r10, [r0, #PM_INFO_MX7D_SRC_V_OFFSET]
str r3, [r10, r6]
str r1, [r10, r7]
disable_l1_dcache
tlb_set_to_ocram
/* check last to sleep */
ldr r6, [r0, #PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET]
ldr r7, [r0, #PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET]
cmp r6, r7
bne lpi_enter_done
ddrc_enter_self_refresh
ccm_enter_idle
anatop_enter_idle
ldr r10, [r0, #PM_INFO_MX7D_GIC_DIST_V_OFFSET]
ldr r7, =0x0
ldr r8, =0x1000
str r7, [r10, r8]
ldr r10, [r0, #PM_INFO_MX7D_GPC_V_OFFSET]
ldr r4, [r10, #MX7D_GPC_IMR1]
ldr r5, [r10, #MX7D_GPC_IMR2]
ldr r6, [r10, #MX7D_GPC_IMR3]
ldr r7, [r10, #MX7D_GPC_IMR4]
ldr r8, =0xffffffff
str r8, [r10, #MX7D_GPC_IMR1]
str r8, [r10, #MX7D_GPC_IMR2]
str r8, [r10, #MX7D_GPC_IMR3]
str r8, [r10, #MX7D_GPC_IMR4]
/*
* enable the RBC bypass counter here
* to hold off the interrupts. RBC counter
* = 8 (240us). With this setting, the latency
* from wakeup interrupt to ARM power up
* is ~250uS.
*/
ldr r8, [r10, #0x14]
bic r8, r8, #(0x3f << 24)
orr r8, r8, #(0x8 << 24)
str r8, [r10, #0x14]
/* enable the counter. */
ldr r8, [r10, #0x14]
orr r8, r8, #(0x1 << 30)
str r8, [r10, #0x14]
/* unmask all the GPC interrupts. */
str r4, [r10, #MX7D_GPC_IMR1]
str r5, [r10, #MX7D_GPC_IMR2]
str r6, [r10, #MX7D_GPC_IMR3]
str r7, [r10, #MX7D_GPC_IMR4]
/*
* now delay for a short while (30usec)
* ARM is at 24MHz at this point
* so a short loop should be enough.
* this delay is required to ensure that
* the RBC counter can start counting in
* case an interrupt is already pending
* or in case an interrupt arrives just
* as ARM is about to assert DSM_request.
*/
ldr r4, =5
rbc_loop:
subs r4, r4, #0x1
bne rbc_loop
lpi_enter_done:
imx_pen_unlock
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
imx_pen_lock
/* check first to wake */
ldr r6, [r0, #PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET]
ldr r7, [r0, #PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET]
cmp r6, r7
bne skip_lpi_flow
ldr r5, =0x0
anatop_exit_idle
ccm_exit_idle
ddrc_exit_self_refresh
ldr r10, [r0, #PM_INFO_MX7D_GIC_DIST_V_OFFSET]
ldr r7, =0x1
ldr r8, =0x1000
str r7, [r10, r8]
skip_lpi_flow:
tlb_back_to_ddr
#ifdef CONFIG_SMP
/* Turn on SMP bit. */
mrc p15, 0, r7, c1, c0, 1
orr r7, r7, #0x40
mcr p15, 0, r7, c1, c0, 1
isb
#endif
/* enable d-cache */
mrc p15, 0, r7, c1, c0, 0
orr r7, r7, #(1 << 2)
mcr p15, 0, r7, c1, c0, 0
dsb
isb
/* Restore registers */
pop {r0 - r12}
mov pc, lr
wakeup:
/* invalidate L1 I-cache first */
mov r1, #0x0
mcr p15, 0, r1, c7, c5, 0
mcr p15, 0, r1, c7, c5, 0
mcr p15, 0, r1, c7, c5, 6
/* enable the Icache and branch prediction */
mov r1, #0x1800
mcr p15, 0, r1, c1, c0, 0
isb
imx_pen_lock
/* check first to wake */
ldr r6, [r0, #PM_INFO_PM_INFO_NUM_ONLINE_CPUS_OFFSET]
ldr r7, [r0, #PM_INFO_PM_INFO_NUM_LPI_CPUS_OFFSET]
cmp r6, r7
bne wakeup_skip_lpi_flow
ldr r5, =0x1
anatop_exit_idle
ccm_exit_idle
ddrc_exit_self_refresh
wakeup_skip_lpi_flow:
/* get physical resume address from pm_info. */
ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
/* Restore registers */
mov pc, lr
.ltorg
ENDPROC(imx7d_low_power_idle)

View File

@ -13,6 +13,13 @@
#include <asm/mach/map.h>
#include "common.h"
#include "cpuidle.h"
static struct property device_disabled = {
.name = "status",
.length = sizeof("disabled"),
.value = "disabled",
};
static int ar8031_phy_fixup(struct phy_device *dev)
{
@ -76,6 +83,17 @@ static inline void imx7d_enet_init(void)
imx7d_enet_clk_sel();
}
static inline void imx7d_disable_arm_arch_timer(void)
{
struct device_node *node;
node = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (node) {
pr_info("disable arm arch timer for nosmp!\n");
of_add_property(node, &device_disabled);
}
}
static void __init imx7d_init_machine(void)
{
struct device *parent;
@ -85,20 +103,27 @@ static void __init imx7d_init_machine(void)
pr_warn("failed to initialize soc device\n");
imx_anatop_init();
of_platform_default_populate(NULL, NULL, parent);
imx7d_pm_init();
imx7d_enet_init();
}
static void __init imx7d_init_late(void)
{
imx7d_cpuidle_init();
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
}
static void __init imx7d_init_irq(void)
{
imx_gpcv2_check_dt();
imx_init_revision_from_anatop();
imx_src_init();
irqchip_init();
#ifndef CONFIG_SMP
imx7d_disable_arm_arch_timer();
#endif
}
static void __init imx7d_map_io(void)
@ -116,6 +141,7 @@ static const char *const imx7d_dt_compat[] __initconst = {
DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)")
.map_io = imx7d_map_io,
.smp = smp_ops(imx_smp_ops),
.init_irq = imx7d_init_irq,
.init_machine = imx7d_init_machine,
.init_late = imx7d_init_late,

View File

@ -47,15 +47,39 @@ static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
return 0;
}
#define MXC_ARCH_CA7 0xc07
static unsigned long __mxc_arch_type;
static inline bool arm_is_ca7(void)
{
return __mxc_arch_type == MXC_ARCH_CA7;
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
static void __init imx_smp_init_cpus(void)
{
unsigned long arch_type;
int i, ncores;
ncores = scu_get_core_count(scu_base);
asm volatile(
".align 4\n"
"mrc p15, 0, %0, c0, c0, 0\n"
: "=r" (arch_type)
);
/* MIDR[15:4] defines ARCH type */
__mxc_arch_type = (arch_type >> 4) & 0xfff;
if (arm_is_ca7()) {
unsigned long val;
/* CA7 core number, [25:24] of CP15 L2CTLR */
asm volatile("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
ncores = ((val >> 24) & 0x3) + 1;
} else {
ncores = scu_get_core_count(scu_base);
}
for (i = ncores; i < NR_CPUS; i++)
set_cpu_possible(i, false);
@ -63,11 +87,15 @@ static void __init imx_smp_init_cpus(void)
void imx_smp_prepare(void)
{
if (arm_is_ca7())
return;
scu_enable(scu_base);
}
static void __init imx_smp_prepare_cpus(unsigned int max_cpus)
{
if (arm_is_ca7())
return;
imx_smp_prepare();
/*

File diff suppressed because it is too large Load Diff

View File

@ -24,9 +24,17 @@
#define BP_SRC_SCR_SW_IPU2_RST 12
#define BP_SRC_SCR_CORE1_RST 14
#define BP_SRC_SCR_CORE1_ENABLE 22
/* below is for i.MX7D */
#define SRC_GPR1_V2 0x074
#define SRC_A7RCR0 0x004
#define SRC_A7RCR1 0x008
#define SRC_M4RCR 0x00C
#define BP_SRC_A7RCR0_A7_CORE_RESET0 0
#define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1
static void __iomem *src_base;
static DEFINE_SPINLOCK(scr_lock);
static DEFINE_SPINLOCK(src_lock);
static bool m4_is_enabled;
static const int sw_reset_bits[5] = {
@ -58,11 +66,11 @@ static int imx_src_reset_module(struct reset_controller_dev *rcdev,
bit = 1 << sw_reset_bits[sw_reset_idx];
spin_lock_irqsave(&scr_lock, flags);
spin_lock_irqsave(&src_lock, flags);
val = readl_relaxed(src_base + SRC_SCR);
val |= bit;
writel_relaxed(val, src_base + SRC_SCR);
spin_unlock_irqrestore(&scr_lock, flags);
spin_unlock_irqrestore(&src_lock, flags);
timeout = jiffies + msecs_to_jiffies(1000);
while (readl(src_base + SRC_SCR) & bit) {
@ -88,32 +96,59 @@ void imx_enable_cpu(int cpu, bool enable)
u32 mask, val;
cpu = cpu_logical_map(cpu);
mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
spin_lock(&scr_lock);
val = readl_relaxed(src_base + SRC_SCR);
val = enable ? val | mask : val & ~mask;
val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
writel_relaxed(val, src_base + SRC_SCR);
spin_unlock(&scr_lock);
spin_lock(&src_lock);
if (cpu_is_imx7d()) {
/* enable core */
if (enable)
imx_gpcv2_set_core1_pdn_pup_by_software(false);
mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1);
val = readl_relaxed(src_base + SRC_A7RCR1);
val = enable ? val | mask : val & ~mask;
writel_relaxed(val, src_base + SRC_A7RCR1);
} else {
mask = 1 << (BP_SRC_SCR_CORE1_ENABLE + cpu - 1);
val = readl_relaxed(src_base + SRC_SCR);
val = enable ? val | mask : val & ~mask;
val |= 1 << (BP_SRC_SCR_CORE1_RST + cpu - 1);
writel_relaxed(val, src_base + SRC_SCR);
}
spin_unlock(&src_lock);
}
void imx_set_cpu_jump(int cpu, void *jump_addr)
{
spin_lock(&src_lock);
cpu = cpu_logical_map(cpu);
writel_relaxed(__pa_symbol(jump_addr),
src_base + SRC_GPR1 + cpu * 8);
if (cpu_is_imx7d())
writel_relaxed(__pa_symbol(jump_addr),
src_base + SRC_GPR1_V2 + cpu * 8);
else
writel_relaxed(__pa_symbol(jump_addr),
src_base + SRC_GPR1 + cpu * 8);
spin_unlock(&src_lock);
}
u32 imx_get_cpu_arg(int cpu)
{
cpu = cpu_logical_map(cpu);
return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
if (cpu_is_imx7d())
return readl_relaxed(src_base + SRC_GPR1_V2
+ cpu * 8 + 4);
else
return readl_relaxed(src_base + SRC_GPR1
+ cpu * 8 + 4);
}
void imx_set_cpu_arg(int cpu, u32 arg)
{
cpu = cpu_logical_map(cpu);
writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
if (cpu_is_imx7d())
writel_relaxed(arg, src_base + SRC_GPR1_V2
+ cpu * 8 + 4);
else
writel_relaxed(arg, src_base + SRC_GPR1
+ cpu * 8 + 4);
}
void __init imx_src_init(void)
@ -127,6 +162,15 @@ void __init imx_src_init(void)
src_base = of_iomap(np, 0);
WARN_ON(!src_base);
if (cpu_is_imx7d()) {
val = readl_relaxed(src_base + SRC_M4RCR);
if (((val & BIT(3)) == BIT(3)) && !(val & BIT(0)))
m4_is_enabled = true;
else
m4_is_enabled = false;
return;
}
imx_reset_controller.of_node = np;
if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
reset_controller_register(&imx_reset_controller);
@ -135,7 +179,7 @@ void __init imx_src_init(void)
* force warm reset sources to generate cold reset
* for a more reliable restart
*/
spin_lock(&scr_lock);
spin_lock(&src_lock);
val = readl_relaxed(src_base + SRC_SCR);
/* bit 4 is m4c_non_sclr_rst on i.MX6SX */
@ -147,5 +191,5 @@ void __init imx_src_init(void)
val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE);
writel_relaxed(val, src_base + SRC_SCR);
spin_unlock(&scr_lock);
spin_unlock(&src_lock);
}

View File

@ -0,0 +1,714 @@
/*
* Copyright (C) 2015 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include "hardware.h"
/*
* ==================== low level suspend ====================
*
* Better to follow below rules to use ARM registers:
* r0: pm_info structure address;
* r1 ~ r4: for saving pm_info members;
* r5 ~ r10: free registers;
* r11: io base address.
*
* suspend ocram space layout:
* ======================== high address ======================
* .
* .
* .
* ^
* ^
* ^
* imx7_suspend code
* PM_INFO structure(imx7_cpu_pm_info)
* ======================== low address =======================
*/
/*
* Below offsets are based on struct imx7_cpu_pm_info
* which defined in arch/arm/mach-imx/pm-imx7.c, this
* structure contains necessary pm info for low level
* suspend related code.
*/
#define PM_INFO_M4_RESERVE0_OFFSET 0x0
#define PM_INFO_M4_RESERVE1_OFFSET 0x4
#define PM_INFO_M4_RESERVE2_OFFSET 0x8
#define PM_INFO_PBASE_OFFSET 0xc
#define PM_INFO_RESUME_ADDR_OFFSET 0x10
#define PM_INFO_DDR_TYPE_OFFSET 0x14
#define PM_INFO_PM_INFO_SIZE_OFFSET 0x18
#define PM_INFO_MX7_DDRC_P_OFFSET 0x1c
#define PM_INFO_MX7_DDRC_V_OFFSET 0x20
#define PM_INFO_MX7_DDRC_PHY_P_OFFSET 0x24
#define PM_INFO_MX7_DDRC_PHY_V_OFFSET 0x28
#define PM_INFO_MX7_SRC_P_OFFSET 0x2c
#define PM_INFO_MX7_SRC_V_OFFSET 0x30
#define PM_INFO_MX7_IOMUXC_GPR_P_OFFSET 0x34
#define PM_INFO_MX7_IOMUXC_GPR_V_OFFSET 0x38
#define PM_INFO_MX7_CCM_P_OFFSET 0x3c
#define PM_INFO_MX7_CCM_V_OFFSET 0x40
#define PM_INFO_MX7_GPC_P_OFFSET 0x44
#define PM_INFO_MX7_GPC_V_OFFSET 0x48
#define PM_INFO_MX7_SNVS_P_OFFSET 0x4c
#define PM_INFO_MX7_SNVS_V_OFFSET 0x50
#define PM_INFO_MX7_ANATOP_P_OFFSET 0x54
#define PM_INFO_MX7_ANATOP_V_OFFSET 0x58
#define PM_INFO_MX7_LPSR_P_OFFSET 0x5c
#define PM_INFO_MX7_LPSR_V_OFFSET 0x60
#define PM_INFO_MX7_GIC_DIST_P_OFFSET 0x64
#define PM_INFO_MX7_GIC_DIST_V_OFFSET 0x68
#define PM_INFO_MX7_TTBR1_V_OFFSET 0x6c
#define PM_INFO_DDRC_REG_NUM_OFFSET 0x70
#define PM_INFO_DDRC_REG_OFFSET 0x74
#define PM_INFO_DDRC_VALUE_OFFSET 0x78
#define PM_INFO_DDRC_PHY_REG_NUM_OFFSET 0x174
#define PM_INFO_DDRC_PHY_REG_OFFSET 0x178
#define PM_INFO_DDRC_PHY_VALUE_OFFSET 0x17c
#define MX7_SRC_GPR1 0x74
#define MX7_SRC_GPR2 0x78
#define GPC_PGC_C0 0x800
#define GPC_PGC_FM 0xa00
#define ANADIG_SNVS_MISC_CTRL 0x380
#define ANADIG_SNVS_MISC_CTRL_SET 0x384
#define ANADIG_SNVS_MISC_CTRL_CLR 0x388
#define ANADIG_DIGPROG 0x800
#define DDRC_STAT 0x4
#define DDRC_PWRCTL 0x30
#define DDRC_PSTAT 0x3fc
#define DDRC_PCTRL_0 0x490
#define DDRC_DFIMISC 0x1b0
#define DDRC_SWCTL 0x320
#define DDRC_SWSTAT 0x324
#define DDRPHY_LP_CON0 0x18
#define CCM_SNVS_LPCG 0x250
#define MX7D_GPC_IMR1 0x30
#define MX7D_GPC_IMR2 0x34
#define MX7D_GPC_IMR3 0x38
#define MX7D_GPC_IMR4 0x3c
.align 3
.macro disable_l1_dcache
/*
* Flush all data from the L1 data cache before disabling
* SCTLR.C bit.
*/
push {r0 - r10, lr}
ldr r7, =v7_flush_dcache_all
mov lr, pc
mov pc, r7
pop {r0 - r10, lr}
/* disable d-cache */
mrc p15, 0, r7, c1, c0, 0
bic r7, r7, #(1 << 2)
mcr p15, 0, r7, c1, c0, 0
dsb
isb
push {r0 - r10, lr}
ldr r7, =v7_flush_dcache_all
mov lr, pc
mov pc, r7
pop {r0 - r10, lr}
.endm
.macro store_ttbr1
/* Store TTBR1 to pm_info->ttbr1 */
mrc p15, 0, r7, c2, c0, 1
str r7, [r0, #PM_INFO_MX7_TTBR1_V_OFFSET]
/* Disable Branch Prediction, Z bit in SCTLR. */
mrc p15, 0, r6, c1, c0, 0
bic r6, r6, #0x800
mcr p15, 0, r6, c1, c0, 0
/* Flush the BTAC. */
ldr r6, =0x0
mcr p15, 0, r6, c7, c1, 6
ldr r6, =iram_tlb_phys_addr
ldr r6, [r6]
dsb
isb
/* Store the IRAM table in TTBR1 */
mcr p15, 0, r6, c2, c0, 1
/* Read TTBCR and set PD0=1, N = 1 */
mrc p15, 0, r6, c2, c0, 2
orr r6, r6, #0x11
mcr p15, 0, r6, c2, c0, 2
dsb
isb
/* flush the TLB */
ldr r6, =0x0
mcr p15, 0, r6, c8, c3, 0
.endm
.macro restore_ttbr1
/* Enable L1 data cache. */
mrc p15, 0, r6, c1, c0, 0
orr r6, r6, #0x4
mcr p15, 0, r6, c1, c0, 0
dsb
isb
/* Restore TTBCR */
/* Read TTBCR and set PD0=0, N = 0 */
mrc p15, 0, r6, c2, c0, 2
bic r6, r6, #0x11
mcr p15, 0, r6, c2, c0, 2
dsb
isb
/* flush the TLB */
ldr r6, =0x0
mcr p15, 0, r6, c8, c3, 0
/* Enable Branch Prediction, Z bit in SCTLR. */
mrc p15, 0, r6, c1, c0, 0
orr r6, r6, #0x800
mcr p15, 0, r6, c1, c0, 0
/* Flush the Branch Target Address Cache (BTAC) */
ldr r6, =0x0
mcr p15, 0, r6, c7, c1, 6
/* Restore TTBR1, get the origin ttbr1 from pm info */
ldr r7, [r0, #PM_INFO_MX7_TTBR1_V_OFFSET]
mcr p15, 0, r7, c2, c0, 1
.endm
.macro ddrc_enter_self_refresh
ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET]
/* let DDR out of self-refresh */
ldr r7, =0x0
str r7, [r11, #DDRC_PWRCTL]
/* wait rw port_busy clear */
ldr r6, =(0x1 << 16)
orr r6, r6, #0x1
1:
ldr r7, [r11, #DDRC_PSTAT]
ands r7, r7, r6
bne 1b
/* enter self-refresh bit 5 */
ldr r7, =(0x1 << 5)
str r7, [r11, #DDRC_PWRCTL]
/* wait until self-refresh mode entered */
2:
ldr r7, [r11, #DDRC_STAT]
and r7, r7, #0x3
cmp r7, #0x3
bne 2b
3:
ldr r7, [r11, #DDRC_STAT]
ands r7, r7, #0x20
beq 3b
/* disable dram clk */
ldr r7, [r11, #DDRC_PWRCTL]
orr r7, r7, #(1 << 3)
str r7, [r11, #DDRC_PWRCTL]
.endm
.macro ddrc_exit_self_refresh
cmp r5, #0x0
ldreq r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX7_DDRC_P_OFFSET]
/* let DDR out of self-refresh */
ldr r7, =0x0
str r7, [r11, #DDRC_PWRCTL]
/* wait until self-refresh mode entered */
4:
ldr r7, [r11, #DDRC_STAT]
and r7, r7, #0x3
cmp r7, #0x3
beq 4b
/* enable auto self-refresh */
ldr r7, [r11, #DDRC_PWRCTL]
orr r7, r7, #(1 << 0)
str r7, [r11, #DDRC_PWRCTL]
.endm
.macro wait_delay
5:
subs r6, r6, #0x1
bne 5b
.endm
.macro ddr_enter_retention
ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET]
/* let DDR out of self-refresh */
ldr r7, =0x0
str r7, [r11, #DDRC_PCTRL_0]
/* wait rw port_busy clear */
ldr r6, =(0x1 << 16)
orr r6, r6, #0x1
6:
ldr r7, [r11, #DDRC_PSTAT]
ands r7, r7, r6
bne 6b
ldr r11, [r0, #PM_INFO_MX7_DDRC_V_OFFSET]
/* enter self-refresh bit 5 */
ldr r7, =(0x1 << 5)
str r7, [r11, #DDRC_PWRCTL]
/* wait until self-refresh mode entered */
7:
ldr r7, [r11, #DDRC_STAT]
and r7, r7, #0x3
cmp r7, #0x3
bne 7b
8:
ldr r7, [r11, #DDRC_STAT]
ands r7, r7, #0x20
beq 8b
/* disable dram clk */
ldr r7, =(0x1 << 5)
orr r7, r7, #(1 << 3)
str r7, [r11, #DDRC_PWRCTL]
ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET]
ldr r7, [r11, #ANADIG_DIGPROG]
and r7, r7, #0xff
cmp r7, #0x11
bne 10f
/* TO 1.1 */
ldr r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFFSET]
ldr r7, =0x38000000
str r7, [r11]
/* LPSR mode need to use TO1.0 flow as IOMUX lost power */
ldr r10, [r0, #PM_INFO_MX7_LPSR_V_OFFSET]
ldr r7, [r10]
cmp r7, #0x0
beq 11f
10:
/* reset ddr_phy */
ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET]
ldr r7, =0x0
str r7, [r11, #ANADIG_SNVS_MISC_CTRL]
/* delay 7 us */
ldr r6, =6000
wait_delay
ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFFSET]
ldr r6, =0x1000
ldr r7, [r11, r6]
orr r7, r7, #0x1
str r7, [r11, r6]
11:
/* turn off ddr power */
ldr r11, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET]
ldr r7, =(0x1 << 29)
str r7, [r11, #ANADIG_SNVS_MISC_CTRL_SET]
ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFFSET]
ldr r6, =0x1000
ldr r7, [r11, r6]
orr r7, r7, #0x1
str r7, [r11, r6]
.endm
.macro ddr_exit_retention
cmp r5, #0x0
ldreq r1, [r0, #PM_INFO_MX7_ANATOP_V_OFFSET]
ldrne r1, [r0, #PM_INFO_MX7_ANATOP_P_OFFSET]
ldreq r2, [r0, #PM_INFO_MX7_SRC_V_OFFSET]
ldrne r2, [r0, #PM_INFO_MX7_SRC_P_OFFSET]
ldreq r3, [r0, #PM_INFO_MX7_DDRC_V_OFFSET]
ldrne r3, [r0, #PM_INFO_MX7_DDRC_P_OFFSET]
ldreq r4, [r0, #PM_INFO_MX7_DDRC_PHY_V_OFFSET]
ldrne r4, [r0, #PM_INFO_MX7_DDRC_PHY_P_OFFSET]
ldreq r10, [r0, #PM_INFO_MX7_CCM_V_OFFSET]
ldrne r10, [r0, #PM_INFO_MX7_CCM_P_OFFSET]
ldreq r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFFSET]
ldrne r11, [r0, #PM_INFO_MX7_IOMUXC_GPR_P_OFFSET]
/* turn on ddr power */
ldr r7, =(0x1 << 29)
str r7, [r1, #ANADIG_SNVS_MISC_CTRL_CLR]
ldr r6, =50
wait_delay
/* clear ddr_phy reset */
ldr r6, =0x1000
ldr r7, [r2, r6]
orr r7, r7, #0x3
str r7, [r2, r6]
ldr r7, [r2, r6]
bic r7, r7, #0x1
str r7, [r2, r6]
13:
ldr r6, [r0, #PM_INFO_DDRC_REG_NUM_OFFSET]
ldr r7, =PM_INFO_DDRC_REG_OFFSET
add r7, r7, r0
14:
ldr r8, [r7], #0x4
ldr r9, [r7], #0x4
str r9, [r3, r8]
subs r6, r6, #0x1
bne 14b
ldr r7, =0x20
str r7, [r3, #DDRC_PWRCTL]
ldr r7, =0x0
str r7, [r3, #DDRC_DFIMISC]
/* do PHY, clear ddr_phy reset */
ldr r6, =0x1000
ldr r7, [r2, r6]
bic r7, r7, #0x2
str r7, [r2, r6]
ldr r7, [r1, #ANADIG_DIGPROG]
and r7, r7, #0xff
cmp r7, #0x11
bne 12f
/*
* TKT262940:
* System hang when press RST for DDR PAD is
* in retention mode, fixed on TO1.1
*/
ldr r7, [r11]
bic r7, r7, #(1 << 27)
str r7, [r11]
ldr r7, [r11]
bic r7, r7, #(1 << 29)
str r7, [r11]
12:
ldr r7, =(0x1 << 30)
str r7, [r1, #ANADIG_SNVS_MISC_CTRL_SET]
/* need to delay ~5mS */
ldr r6, =0x100000
wait_delay
ldr r6, [r0, #PM_INFO_DDRC_PHY_REG_NUM_OFFSET]
ldr r7, =PM_INFO_DDRC_PHY_REG_OFFSET
add r7, r7, r0
15:
ldr r8, [r7], #0x4
ldr r9, [r7], #0x4
str r9, [r4, r8]
subs r6, r6, #0x1
bne 15b
ldr r7, =0x0
add r9, r10, #0x4000
str r7, [r9, #0x130]
ldr r7, =0x170
orr r7, r7, #0x8
str r7, [r11, #0x20]
ldr r7, =0x2
add r9, r10, #0x4000
str r7, [r9, #0x130]
ldr r7, =0xf
str r7, [r4, #DDRPHY_LP_CON0]
/* wait until self-refresh mode entered */
16:
ldr r7, [r3, #DDRC_STAT]
and r7, r7, #0x3
cmp r7, #0x3
bne 16b
ldr r7, =0x0
str r7, [r3, #DDRC_SWCTL]
ldr r7, =0x1
str r7, [r3, #DDRC_DFIMISC]
ldr r7, =0x1
str r7, [r3, #DDRC_SWCTL]
17:
ldr r7, [r3, #DDRC_SWSTAT]
and r7, r7, #0x1
cmp r7, #0x1
bne 17b
18:
ldr r7, [r3, #DDRC_STAT]
and r7, r7, #0x20
cmp r7, #0x20
bne 18b
/* let DDR out of self-refresh */
ldr r7, =0x0
str r7, [r3, #DDRC_PWRCTL]
19:
ldr r7, [r3, #DDRC_STAT]
and r7, r7, #0x30
cmp r7, #0x0
bne 19b
20:
ldr r7, [r3, #DDRC_STAT]
and r7, r7, #0x3
cmp r7, #0x1
bne 20b
/* enable port */
ldr r7, =0x1
str r7, [r3, #DDRC_PCTRL_0]
/* enable auto self-refresh */
ldr r7, [r3, #DDRC_PWRCTL]
orr r7, r7, #(1 << 0)
str r7, [r3, #DDRC_PWRCTL]
.endm
ENTRY(imx7_suspend)
push {r4-r12}
/* make sure SNVS clk is enabled */
ldr r11, [r0, #PM_INFO_MX7_CCM_V_OFFSET]
add r11, r11, #0x4000
ldr r7, =0x3
str r7, [r11, #CCM_SNVS_LPCG]
/* check whether it is a standby mode */
ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET]
ldr r7, [r11, #GPC_PGC_C0]
cmp r7, #0
beq ddr_only_self_refresh
/*
* The value of r0 is mapped the same in origin table and IRAM table,
* thus no need to care r0 here.
*/
ldr r1, [r0, #PM_INFO_PBASE_OFFSET]
ldr r2, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
ldr r3, [r0, #PM_INFO_DDR_TYPE_OFFSET]
ldr r4, [r0, #PM_INFO_PM_INFO_SIZE_OFFSET]
/*
* counting the resume address in iram
* to set it in SRC register.
*/
ldr r6, =imx7_suspend
ldr r7, =resume
sub r7, r7, r6
add r8, r1, r4
add r9, r8, r7
ldr r11, [r0, #PM_INFO_MX7_SRC_V_OFFSET]
/* store physical resume addr and pm_info address. */
str r9, [r11, #MX7_SRC_GPR1]
str r1, [r11, #MX7_SRC_GPR2]
disable_l1_dcache
store_ttbr1
ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET]
ldr r7, [r11, #GPC_PGC_FM]
cmp r7, #0
beq ddr_only_self_refresh
ddr_enter_retention
/* enter LPSR mode if resume addr is valid */
ldr r11, [r0, #PM_INFO_MX7_LPSR_V_OFFSET]
ldr r7, [r11]
cmp r7, #0x0
beq ddr_retention_enter_out
/* disable STOP mode before entering LPSR */
ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET]
ldr r7, [r11]
bic r7, #0xf
str r7, [r11]
/* shut down vddsoc to enter lpsr mode */
ldr r11, [r0, #PM_INFO_MX7_SNVS_V_OFFSET]
ldr r7, [r11, #0x38]
orr r7, r7, #0x60
str r7, [r11, #0x38]
wait_shutdown:
wfi
nop
nop
nop
nop
b wait_shutdown
ddr_only_self_refresh:
ddrc_enter_self_refresh
b wfi
ddr_retention_enter_out:
ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFFSET]
ldr r7, =0x0
ldr r8, =0x1000
str r7, [r11, r8]
ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET]
ldr r4, [r11, #MX7D_GPC_IMR1]
ldr r5, [r11, #MX7D_GPC_IMR2]
ldr r6, [r11, #MX7D_GPC_IMR3]
ldr r7, [r11, #MX7D_GPC_IMR4]
ldr r8, =0xffffffff
str r8, [r11, #MX7D_GPC_IMR1]
str r8, [r11, #MX7D_GPC_IMR2]
str r8, [r11, #MX7D_GPC_IMR3]
str r8, [r11, #MX7D_GPC_IMR4]
/*
* enable the RBC bypass counter here
* to hold off the interrupts. RBC counter
* = 8 (240us). With this setting, the latency
* from wakeup interrupt to ARM power up
* is ~250uS.
*/
ldr r8, [r11, #0x14]
bic r8, r8, #(0x3f << 24)
orr r8, r8, #(0x8 << 24)
str r8, [r11, #0x14]
/* enable the counter. */
ldr r8, [r11, #0x14]
orr r8, r8, #(0x1 << 30)
str r8, [r11, #0x14]
/* unmask all the GPC interrupts. */
str r4, [r11, #MX7D_GPC_IMR1]
str r5, [r11, #MX7D_GPC_IMR2]
str r6, [r11, #MX7D_GPC_IMR3]
str r7, [r11, #MX7D_GPC_IMR4]
/*
* now delay for a short while (3usec)
* ARM is at 1GHz at this point
* so a short loop should be enough.
* this delay is required to ensure that
* the RBC counter can start counting in
* case an interrupt is already pending
* or in case an interrupt arrives just
* as ARM is about to assert DSM_request.
*/
ldr r7, =2000
rbc_loop:
subs r7, r7, #0x1
bne rbc_loop
wfi:
/* Zzz, enter stop mode */
wfi
nop
nop
nop
nop
mov r5, #0x0
ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET]
ldr r7, [r11, #GPC_PGC_FM]
cmp r7, #0
beq wfi_ddr_self_refresh_out
ddr_exit_retention
b wfi_ddr_retention_out
wfi_ddr_self_refresh_out:
ddrc_exit_self_refresh
wfi_ddr_retention_out:
/* check whether it is a standby mode */
ldr r11, [r0, #PM_INFO_MX7_GPC_V_OFFSET]
ldr r7, [r11, #GPC_PGC_C0]
cmp r7, #0
beq standby_out
ldr r11, [r0, #PM_INFO_MX7_GIC_DIST_V_OFFSET]
ldr r7, =0x1
ldr r8, =0x1000
str r7, [r11, r8]
restore_ttbr1
standby_out:
pop {r4-r12}
/* return to suspend finish */
mov pc, lr
resume:
/* invalidate L1 I-cache first */
mov r6, #0x0
mcr p15, 0, r6, c7, c5, 0
mcr p15, 0, r6, c7, c5, 6
/* enable the Icache and branch prediction */
mov r6, #0x1800
mcr p15, 0, r6, c1, c0, 0
isb
/* get physical resume address from pm_info. */
ldr lr, [r0, #PM_INFO_RESUME_ADDR_OFFSET]
/* clear core0's entry and parameter */
ldr r11, [r0, #PM_INFO_MX7_SRC_P_OFFSET]
mov r7, #0x0
str r7, [r11, #MX7_SRC_GPR1]
str r7, [r11, #MX7_SRC_GPR2]
mov r5, #0x1
ldr r11, [r0, #PM_INFO_MX7_GPC_P_OFFSET]
ldr r7, [r11, #GPC_PGC_FM]
cmp r7, #0
beq dsm_ddr_self_refresh_out
ddr_exit_retention
b dsm_ddr_retention_out
dsm_ddr_self_refresh_out:
ddrc_exit_self_refresh
dsm_ddr_retention_out:
mov pc, lr
ENDPROC(imx7_suspend)
ENTRY(ca7_cpu_resume)
bl v7_invalidate_l1
b cpu_resume
ENDPROC(ca7_cpu_resume)