1
0
Fork 0

This version contains the following changes:

- Rewrite of the davinci timer resulting to an immutable branch to be
    shared with davinci platform specific tree (Bartosz Golaszewski)
 
  - Cleanup and improvements of the tegra timer (Dmitry Osipenko)
 
  - Add new nxp system counter timer (Bai Ping)
 
  - Increase priority for exynos_mct to take over the initialization
    of the IP the arch ARM timer depends on (Marek Szyprowski)
 
  - Change macro use _BITUL() by BIT() on arc timer (Masahiro Yamada)
 
  - Implement the delay timer on ixp4xx (Linus Walleij)
 
  - Add the SPDX license identifier on the meson timer (Neil Armstrong)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGn3N4YVz0WNVyHskqDIjiipP6E8FAl0TgxUACgkQqDIjiipP
 6E9vjggAhHXvbj8Ec5ex7z1Y7Md61aTsFsUbWOPGuG5oe60tYDKqTt4KnOb3db1+
 Ho53OAv98Qim6AikQpl3e0yr9Dv6FG4xDWiBAaIv4C1JZDgeq73ULTHatichtOkM
 VvaLKERXoOch3D8euDdjzTjHMCE3DIeW1XafQkg74Kb9a1bJVSORyIW+dCrl0S6U
 VCPcK0wu8kzvmV5GPJnwFxgpk22wGxTi7H0bX0Xi8DuZCDc96efagujSsDf7xaji
 Q1EupCGEqsYO+VUFPvVKnJmGJF1jScNsvuBPpI2I6kYBOccq5aL0cdb55fBvKdGW
 9/x/Rcl3VUQA9wRTyFTmYeGEOwO+CA==
 =aSmV
 -----END PGP SIGNATURE-----

Merge tag 'timers-v5.4' of https://git.linaro.org/people/daniel.lezcano/linux into timers/core

Pull clocksource/events updates from Daniel Lezcano:

 - Rewrite of the davinci timer resulting to an immutable branch to be
   shared with davinci platform specific tree (Bartosz Golaszewski)

 - Cleanup and improvements of the tegra timer (Dmitry Osipenko)

 - Add new nxp system counter timer (Bai Ping)

 - Increase priority for exynos_mct to take over the initialization
   of the IP the arch ARM timer depends on (Marek Szyprowski)

 - Change macro use _BITUL() by BIT() on arc timer (Masahiro Yamada)

 - Implement the delay timer on ixp4xx (Linus Walleij)

 - Add the SPDX license identifier on the meson timer (Neil Armstrong)
alistair/sunxi64-5.4-dsi
Thomas Gleixner 2019-06-26 16:45:49 +02:00
commit a57e8e1982
16 changed files with 1060 additions and 404 deletions

View File

@ -0,0 +1,25 @@
NXP System Counter Module(sys_ctr)
The system counter(sys_ctr) is a programmable system counter which provides
a shared time base to Cortex A15, A7, A53, A73, etc. it is intended for use in
applications where the counter is always powered and support multiple,
unrelated clocks. The compare frame inside can be used for timer purpose.
Required properties:
- compatible : should be "nxp,sysctr-timer"
- reg : Specifies the base physical address and size of the comapre
frame and the counter control, read & compare.
- interrupts : should be the first compare frames' interrupt
- clocks : Specifies the counter clock.
- clock-names: Specifies the clock's name of this module
Example:
system_counter: timer@306a0000 {
compatible = "nxp,sysctr-timer";
reg = <0x306a0000 0x20000>;/* system-counter-rd & compare */
clocks = <&clk_8m>;
clock-names = "per";
interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
};

View File

@ -4,6 +4,7 @@
#include <asm/barrier.h>
#include <asm/errno.h>
#include <asm/hwcap.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/types.h>
@ -124,6 +125,15 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb();
}
static inline void arch_timer_set_evtstrm_feature(void)
{
elf_hwcap |= HWCAP_EVTSTRM;
}
static inline bool arch_timer_have_evtstrm_feature(void)
{
return elf_hwcap & HWCAP_EVTSTRM;
}
#endif
#endif

View File

@ -20,6 +20,7 @@
#define __ASM_ARCH_TIMER_H
#include <asm/barrier.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
#include <linux/bug.h>
@ -240,4 +241,16 @@ static inline int arch_timer_arch_init(void)
return 0;
}
static inline void arch_timer_set_evtstrm_feature(void)
{
cpu_set_named_feature(EVTSTRM);
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
}
static inline bool arch_timer_have_evtstrm_feature(void)
{
return cpu_have_named_feature(EVTSTRM);
}
#endif

View File

@ -43,6 +43,11 @@ config BCM_KONA_TIMER
help
Enables the support for the BCM Kona mobile timer driver.
config DAVINCI_TIMER
bool "Texas Instruments DaVinci timer driver" if COMPILE_TEST
help
Enables the support for the TI DaVinci timer driver.
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
select CLKSRC_MMIO
@ -140,7 +145,7 @@ config TEGRA_TIMER
bool "Tegra timer driver" if COMPILE_TEST
select CLKSRC_MMIO
select TIMER_OF
depends on ARM || ARM64
depends on ARCH_TEGRA || COMPILE_TEST
help
Enables support for the Tegra driver.
@ -617,6 +622,13 @@ config CLKSRC_IMX_TPM
Enable this option to use IMX Timer/PWM Module (TPM) timer as
clocksource.
config TIMER_IMX_SYS_CTR
bool "i.MX system counter timer" if COMPILE_TEST
select TIMER_OF
help
Enable this option to use i.MX system counter timer as a
clockevent.
config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select TIMER_OF if OF

View File

@ -15,6 +15,7 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DAVINCI_TIMER) += timer-davinci.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
@ -36,7 +37,7 @@ obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o
obj-$(CONFIG_TEGRA_TIMER) += timer-tegra20.o
obj-$(CONFIG_TEGRA_TIMER) += timer-tegra.o
obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
@ -74,6 +75,7 @@ obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
obj-$(CONFIG_TIMER_IMX_SYS_CTR) += timer-imx-sysctr.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o

View File

@ -16,6 +16,7 @@
*/
#include <linux/interrupt.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
@ -142,7 +143,7 @@ static u64 arc_read_rtc(struct clocksource *cs)
l = read_aux_reg(AUX_RTC_LOW);
h = read_aux_reg(AUX_RTC_HIGH);
status = read_aux_reg(AUX_RTC_CTRL);
} while (!(status & _BITUL(31)));
} while (!(status & BIT(31)));
return (((u64)h) << 32) | l;
}

View File

@ -804,14 +804,7 @@ static void arch_timer_evtstrm_enable(int divider)
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
#ifdef CONFIG_ARM64
cpu_set_named_feature(EVTSTRM);
#else
elf_hwcap |= HWCAP_EVTSTRM;
#endif
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
arch_timer_set_evtstrm_feature();
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
@ -1040,11 +1033,7 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self,
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
#ifdef CONFIG_ARM64
if (cpu_have_named_feature(EVTSTRM))
#else
if (elf_hwcap & HWCAP_EVTSTRM)
#endif
if (arch_timer_have_evtstrm_feature())
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;

View File

@ -209,7 +209,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
static struct clocksource mct_frc = {
.name = "mct-frc",
.rating = 400,
.rating = 450, /* use value higher than ARM arch timer */
.read = exynos4_frc_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@ -464,7 +464,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450;
evt->rating = 500; /* use value higher than ARM arch timer */
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);

View File

@ -0,0 +1,369 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI DaVinci clocksource driver
*
* Copyright (C) 2019 Texas Instruments
* Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
* (with tiny parts adopted from code by Kevin Hilman <khilman@baylibre.com>)
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <clocksource/timer-davinci.h>
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt "\n", __func__
#define DAVINCI_TIMER_REG_TIM12 0x10
#define DAVINCI_TIMER_REG_TIM34 0x14
#define DAVINCI_TIMER_REG_PRD12 0x18
#define DAVINCI_TIMER_REG_PRD34 0x1c
#define DAVINCI_TIMER_REG_TCR 0x20
#define DAVINCI_TIMER_REG_TGCR 0x24
#define DAVINCI_TIMER_TIMMODE_MASK GENMASK(3, 2)
#define DAVINCI_TIMER_RESET_MASK GENMASK(1, 0)
#define DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED BIT(2)
#define DAVINCI_TIMER_UNRESET GENMASK(1, 0)
#define DAVINCI_TIMER_ENAMODE_MASK GENMASK(1, 0)
#define DAVINCI_TIMER_ENAMODE_DISABLED 0x00
#define DAVINCI_TIMER_ENAMODE_ONESHOT BIT(0)
#define DAVINCI_TIMER_ENAMODE_PERIODIC BIT(1)
#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM12 6
#define DAVINCI_TIMER_ENAMODE_SHIFT_TIM34 22
#define DAVINCI_TIMER_MIN_DELTA 0x01
#define DAVINCI_TIMER_MAX_DELTA 0xfffffffe
#define DAVINCI_TIMER_CLKSRC_BITS 32
#define DAVINCI_TIMER_TGCR_DEFAULT \
(DAVINCI_TIMER_TIMMODE_32BIT_UNCHAINED | DAVINCI_TIMER_UNRESET)
struct davinci_clockevent {
struct clock_event_device dev;
void __iomem *base;
unsigned int cmp_off;
};
/*
* This must be globally accessible by davinci_timer_read_sched_clock(), so
* let's keep it here.
*/
static struct {
struct clocksource dev;
void __iomem *base;
unsigned int tim_off;
} davinci_clocksource;
static struct davinci_clockevent *
to_davinci_clockevent(struct clock_event_device *clockevent)
{
return container_of(clockevent, struct davinci_clockevent, dev);
}
static unsigned int
davinci_clockevent_read(struct davinci_clockevent *clockevent,
unsigned int reg)
{
return readl_relaxed(clockevent->base + reg);
}
static void davinci_clockevent_write(struct davinci_clockevent *clockevent,
unsigned int reg, unsigned int val)
{
writel_relaxed(val, clockevent->base + reg);
}
static void davinci_tim12_shutdown(void __iomem *base)
{
unsigned int tcr;
tcr = DAVINCI_TIMER_ENAMODE_DISABLED <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
/*
* This function is only ever called if we're using both timer
* halves. In this case TIM34 runs in periodic mode and we must
* not modify it.
*/
tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
static void davinci_tim12_set_oneshot(void __iomem *base)
{
unsigned int tcr;
tcr = DAVINCI_TIMER_ENAMODE_ONESHOT <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
/* Same as above. */
tcr |= DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
static int davinci_clockevent_shutdown(struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent;
clockevent = to_davinci_clockevent(dev);
davinci_tim12_shutdown(clockevent->base);
return 0;
}
static int davinci_clockevent_set_oneshot(struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
davinci_tim12_set_oneshot(clockevent->base);
return 0;
}
static int
davinci_clockevent_set_next_event_std(unsigned long cycles,
struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
davinci_clockevent_shutdown(dev);
davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_TIM12, 0x0);
davinci_clockevent_write(clockevent, DAVINCI_TIMER_REG_PRD12, cycles);
davinci_clockevent_set_oneshot(dev);
return 0;
}
static int
davinci_clockevent_set_next_event_cmp(unsigned long cycles,
struct clock_event_device *dev)
{
struct davinci_clockevent *clockevent = to_davinci_clockevent(dev);
unsigned int curr_time;
curr_time = davinci_clockevent_read(clockevent,
DAVINCI_TIMER_REG_TIM12);
davinci_clockevent_write(clockevent,
clockevent->cmp_off, curr_time + cycles);
return 0;
}
static irqreturn_t davinci_timer_irq_timer(int irq, void *data)
{
struct davinci_clockevent *clockevent = data;
if (!clockevent_state_oneshot(&clockevent->dev))
davinci_tim12_shutdown(clockevent->base);
clockevent->dev.event_handler(&clockevent->dev);
return IRQ_HANDLED;
}
static u64 notrace davinci_timer_read_sched_clock(void)
{
return readl_relaxed(davinci_clocksource.base +
davinci_clocksource.tim_off);
}
static u64 davinci_clocksource_read(struct clocksource *dev)
{
return davinci_timer_read_sched_clock();
}
/*
* Standard use-case: we're using tim12 for clockevent and tim34 for
* clocksource. The default is making the former run in oneshot mode
* and the latter in periodic mode.
*/
static void davinci_clocksource_init_tim34(void __iomem *base)
{
int tcr;
tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM34;
tcr |= DAVINCI_TIMER_ENAMODE_ONESHOT <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD34);
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
/*
* Special use-case on da830: the DSP may use tim34. We're using tim12 for
* both clocksource and clockevent. We set tim12 to periodic and don't touch
* tim34.
*/
static void davinci_clocksource_init_tim12(void __iomem *base)
{
unsigned int tcr;
tcr = DAVINCI_TIMER_ENAMODE_PERIODIC <<
DAVINCI_TIMER_ENAMODE_SHIFT_TIM12;
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
writel_relaxed(UINT_MAX, base + DAVINCI_TIMER_REG_PRD12);
writel_relaxed(tcr, base + DAVINCI_TIMER_REG_TCR);
}
static void davinci_timer_init(void __iomem *base)
{
/* Set clock to internal mode and disable it. */
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TCR);
/*
* Reset both 32-bit timers, set no prescaler for timer 34, set the
* timer to dual 32-bit unchained mode, unreset both 32-bit timers.
*/
writel_relaxed(DAVINCI_TIMER_TGCR_DEFAULT,
base + DAVINCI_TIMER_REG_TGCR);
/* Init both counters to zero. */
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM12);
writel_relaxed(0x0, base + DAVINCI_TIMER_REG_TIM34);
}
int __init davinci_timer_register(struct clk *clk,
const struct davinci_timer_cfg *timer_cfg)
{
struct davinci_clockevent *clockevent;
unsigned int tick_rate;
void __iomem *base;
int rv;
rv = clk_prepare_enable(clk);
if (rv) {
pr_err("Unable to prepare and enable the timer clock");
return rv;
}
if (!request_mem_region(timer_cfg->reg.start,
resource_size(&timer_cfg->reg),
"davinci-timer")) {
pr_err("Unable to request memory region");
return -EBUSY;
}
base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
if (!base) {
pr_err("Unable to map the register range");
return -ENOMEM;
}
davinci_timer_init(base);
tick_rate = clk_get_rate(clk);
clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL | __GFP_NOFAIL);
if (!clockevent) {
pr_err("Error allocating memory for clockevent data");
return -ENOMEM;
}
clockevent->dev.name = "tim12";
clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
clockevent->dev.cpumask = cpumask_of(0);
clockevent->base = base;
if (timer_cfg->cmp_off) {
clockevent->cmp_off = timer_cfg->cmp_off;
clockevent->dev.set_next_event =
davinci_clockevent_set_next_event_cmp;
} else {
clockevent->dev.set_next_event =
davinci_clockevent_set_next_event_std;
clockevent->dev.set_state_oneshot =
davinci_clockevent_set_oneshot;
clockevent->dev.set_state_shutdown =
davinci_clockevent_shutdown;
}
rv = request_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
davinci_timer_irq_timer, IRQF_TIMER,
"clockevent/tim12", clockevent);
if (rv) {
pr_err("Unable to request the clockevent interrupt");
return rv;
}
clockevents_config_and_register(&clockevent->dev, tick_rate,
DAVINCI_TIMER_MIN_DELTA,
DAVINCI_TIMER_MAX_DELTA);
davinci_clocksource.dev.rating = 300;
davinci_clocksource.dev.read = davinci_clocksource_read;
davinci_clocksource.dev.mask =
CLOCKSOURCE_MASK(DAVINCI_TIMER_CLKSRC_BITS);
davinci_clocksource.dev.flags = CLOCK_SOURCE_IS_CONTINUOUS;
davinci_clocksource.base = base;
if (timer_cfg->cmp_off) {
davinci_clocksource.dev.name = "tim12";
davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM12;
davinci_clocksource_init_tim12(base);
} else {
davinci_clocksource.dev.name = "tim34";
davinci_clocksource.tim_off = DAVINCI_TIMER_REG_TIM34;
davinci_clocksource_init_tim34(base);
}
rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
if (rv) {
pr_err("Unable to register clocksource");
return rv;
}
sched_clock_register(davinci_timer_read_sched_clock,
DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
return 0;
}
static int __init of_davinci_timer_register(struct device_node *np)
{
struct davinci_timer_cfg timer_cfg = { };
struct clk *clk;
int rv;
rv = of_address_to_resource(np, 0, &timer_cfg.reg);
if (rv) {
pr_err("Unable to get the register range for timer");
return rv;
}
rv = of_irq_to_resource_table(np, timer_cfg.irq,
DAVINCI_TIMER_NUM_IRQS);
if (rv != DAVINCI_TIMER_NUM_IRQS) {
pr_err("Unable to get the interrupts for timer");
return rv;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Unable to get the timer clock");
return PTR_ERR(clk);
}
rv = davinci_timer_register(clk, &timer_cfg);
if (rv)
clk_put(clk);
return rv;
}
TIMER_OF_DECLARE(davinci_timer, "ti,da830-timer", of_davinci_timer_register);

View File

@ -0,0 +1,145 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2017-2019 NXP
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include "timer-of.h"
#define CMP_OFFSET 0x10000
#define CNTCV_LO 0x8
#define CNTCV_HI 0xc
#define CMPCV_LO (CMP_OFFSET + 0x20)
#define CMPCV_HI (CMP_OFFSET + 0x24)
#define CMPCR (CMP_OFFSET + 0x2c)
#define SYS_CTR_EN 0x1
#define SYS_CTR_IRQ_MASK 0x2
static void __iomem *sys_ctr_base;
static u32 cmpcr;
static void sysctr_timer_enable(bool enable)
{
writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
}
static void sysctr_irq_acknowledge(void)
{
/*
* clear the enable bit(EN =0) will clear
* the status bit(ISTAT = 0), then the interrupt
* signal will be negated(acknowledged).
*/
sysctr_timer_enable(false);
}
static inline u64 sysctr_read_counter(void)
{
u32 cnt_hi, tmp_hi, cnt_lo;
do {
cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
} while (tmp_hi != cnt_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
}
static int sysctr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u32 cmp_hi, cmp_lo;
u64 next;
sysctr_timer_enable(false);
next = sysctr_read_counter();
next += delta;
cmp_hi = (next >> 32) & 0x00fffff;
cmp_lo = next & 0xffffffff;
writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
sysctr_timer_enable(true);
return 0;
}
static int sysctr_set_state_oneshot(struct clock_event_device *evt)
{
return 0;
}
static int sysctr_set_state_shutdown(struct clock_event_device *evt)
{
sysctr_timer_enable(false);
return 0;
}
static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
sysctr_irq_acknowledge();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct timer_of to_sysctr = {
.flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE,
.clkevt = {
.name = "i.MX system counter timer",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ,
.set_state_oneshot = sysctr_set_state_oneshot,
.set_next_event = sysctr_set_next_event,
.set_state_shutdown = sysctr_set_state_shutdown,
.rating = 200,
},
.of_irq = {
.handler = sysctr_timer_interrupt,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
.of_clk = {
.name = "per",
},
};
static void __init sysctr_clockevent_init(void)
{
to_sysctr.clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
0xff, 0x7fffffff);
}
static int __init sysctr_timer_init(struct device_node *np)
{
int ret = 0;
ret = timer_of_init(np, &to_sysctr);
if (ret)
return ret;
sys_ctr_base = timer_of_base(&to_sysctr);
cmpcr = readl(sys_ctr_base + CMPCR);
cmpcr &= ~SYS_CTR_EN;
sysctr_clockevent_init();
return 0;
}
TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);

View File

@ -75,14 +75,19 @@ to_ixp4xx_timer(struct clock_event_device *evt)
return container_of(evt, struct ixp4xx_timer, clkevt);
}
static u64 notrace ixp4xx_read_sched_clock(void)
static unsigned long ixp4xx_read_timer(void)
{
return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
}
static u64 notrace ixp4xx_read_sched_clock(void)
{
return ixp4xx_read_timer();
}
static u64 ixp4xx_clocksource_read(struct clocksource *c)
{
return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
return ixp4xx_read_timer();
}
static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
@ -224,6 +229,13 @@ static __init int ixp4xx_timer_register(void __iomem *base,
sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
#ifdef CONFIG_ARM
/* Also use this timer for delays */
tmr->delay_timer.read_current_timer = ixp4xx_read_timer;
tmr->delay_timer.freq = timer_freq;
register_current_timer_delay(&tmr->delay_timer);
#endif
return 0;
}

View File

@ -1,13 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Amlogic Meson6 SoCs timer handling.
*
* Copyright (C) 2014 Carlo Caione <carlo@caione.org>
*
* Based on code from Amlogic, Inc
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/bitfield.h>

View File

@ -0,0 +1,416 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 Google, Inc.
*
* Author:
* Colin Cross <ccross@google.com>
*/
#define pr_fmt(fmt) "tegra-timer: " fmt
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/sched_clock.h>
#include <linux/time.h>
#include "timer-of.h"
#define RTC_SECONDS 0x08
#define RTC_SHADOW_SECONDS 0x0c
#define RTC_MILLISECONDS 0x10
#define TIMERUS_CNTR_1US 0x10
#define TIMERUS_USEC_CFG 0x14
#define TIMERUS_CNTR_FREEZE 0x4c
#define TIMER_PTV 0x0
#define TIMER_PTV_EN BIT(31)
#define TIMER_PTV_PER BIT(30)
#define TIMER_PCR 0x4
#define TIMER_PCR_INTR_CLR BIT(30)
#define TIMER1_BASE 0x00
#define TIMER2_BASE 0x08
#define TIMER3_BASE 0x50
#define TIMER4_BASE 0x58
#define TIMER10_BASE 0x90
#define TIMER1_IRQ_IDX 0
#define TIMER10_IRQ_IDX 10
#define TIMER_1MHz 1000000
static u32 usec_config;
static void __iomem *timer_reg_base;
static int tegra_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
/*
* Tegra's timer uses n+1 scheme for the counter, i.e. timer will
* fire after one tick if 0 is loaded.
*
* The minimum and maximum numbers of oneshot ticks are defined
* by clockevents_config_and_register(1, 0x1fffffff + 1) invocation
* below in the code. Hence the cycles (ticks) can't be outside of
* a range supportable by hardware.
*/
writel_relaxed(TIMER_PTV_EN | (cycles - 1), reg_base + TIMER_PTV);
return 0;
}
static int tegra_timer_shutdown(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel_relaxed(0, reg_base + TIMER_PTV);
return 0;
}
static int tegra_timer_set_periodic(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
unsigned long period = timer_of_period(to_timer_of(evt));
writel_relaxed(TIMER_PTV_EN | TIMER_PTV_PER | (period - 1),
reg_base + TIMER_PTV);
return 0;
}
static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void tegra_timer_suspend(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel_relaxed(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
}
static void tegra_timer_resume(struct clock_event_device *evt)
{
writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
}
static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
.flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
.clkevt = {
.name = "tegra_timer",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.set_next_event = tegra_timer_set_next_event,
.set_state_shutdown = tegra_timer_shutdown,
.set_state_periodic = tegra_timer_set_periodic,
.set_state_oneshot = tegra_timer_shutdown,
.tick_resume = tegra_timer_shutdown,
.suspend = tegra_timer_suspend,
.resume = tegra_timer_resume,
},
};
static int tegra_timer_setup(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
writel_relaxed(0, timer_of_base(to) + TIMER_PTV);
writel_relaxed(TIMER_PCR_INTR_CLR, timer_of_base(to) + TIMER_PCR);
irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
enable_irq(to->clkevt.irq);
/*
* Tegra's timer uses n+1 scheme for the counter, i.e. timer will
* fire after one tick if 0 is loaded and thus minimum number of
* ticks is 1. In result both of the clocksource's tick limits are
* higher than a minimum and maximum that hardware register can
* take by 1, this is then taken into account by set_next_event
* callback.
*/
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
1, /* min */
0x1fffffff + 1); /* max 29 bits + 1 */
return 0;
}
static int tegra_timer_stop(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
to->clkevt.set_state_shutdown(&to->clkevt);
disable_irq_nosync(to->clkevt.irq);
return 0;
}
static u64 notrace tegra_read_sched_clock(void)
{
return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
}
#ifdef CONFIG_ARM
static unsigned long tegra_delay_timer_read_counter_long(void)
{
return readl_relaxed(timer_reg_base + TIMERUS_CNTR_1US);
}
static struct delay_timer tegra_delay_timer = {
.read_current_timer = tegra_delay_timer_read_counter_long,
.freq = TIMER_1MHz,
};
#endif
static struct timer_of suspend_rtc_to = {
.flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
};
/*
* tegra_rtc_read - Reads the Tegra RTC registers
* Care must be taken that this function is not called while the
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
static u64 tegra_rtc_read_ms(struct clocksource *cs)
{
void __iomem *reg_base = timer_of_base(&suspend_rtc_to);
u32 ms = readl_relaxed(reg_base + RTC_MILLISECONDS);
u32 s = readl_relaxed(reg_base + RTC_SHADOW_SECONDS);
return (u64)s * MSEC_PER_SEC + ms;
}
static struct clocksource suspend_rtc_clocksource = {
.name = "tegra_suspend_timer",
.rating = 200,
.read = tegra_rtc_read_ms,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
static inline unsigned int tegra_base_for_cpu(int cpu, bool tegra20)
{
if (tegra20) {
switch (cpu) {
case 0:
return TIMER1_BASE;
case 1:
return TIMER2_BASE;
case 2:
return TIMER3_BASE;
default:
return TIMER4_BASE;
}
}
return TIMER10_BASE + cpu * 8;
}
static inline unsigned int tegra_irq_idx_for_cpu(int cpu, bool tegra20)
{
if (tegra20)
return TIMER1_IRQ_IDX + cpu;
return TIMER10_IRQ_IDX + cpu;
}
static inline unsigned long tegra_rate_for_timer(struct timer_of *to,
bool tegra20)
{
/*
* TIMER1-9 are fixed to 1MHz, TIMER10-13 are running off the
* parent clock.
*/
if (tegra20)
return TIMER_1MHz;
return timer_of_rate(to);
}
static int __init tegra_init_timer(struct device_node *np, bool tegra20,
int rating)
{
struct timer_of *to;
int cpu, ret;
to = this_cpu_ptr(&tegra_to);
ret = timer_of_init(np, to);
if (ret)
goto out;
timer_reg_base = timer_of_base(to);
/*
* Configure microsecond timers to have 1MHz clock
* Config register is 0xqqww, where qq is "dividend", ww is "divisor"
* Uses n+1 scheme
*/
switch (timer_of_rate(to)) {
case 12000000:
usec_config = 0x000b; /* (11+1)/(0+1) */
break;
case 12800000:
usec_config = 0x043f; /* (63+1)/(4+1) */
break;
case 13000000:
usec_config = 0x000c; /* (12+1)/(0+1) */
break;
case 16800000:
usec_config = 0x0453; /* (83+1)/(4+1) */
break;
case 19200000:
usec_config = 0x045f; /* (95+1)/(4+1) */
break;
case 26000000:
usec_config = 0x0019; /* (25+1)/(0+1) */
break;
case 38400000:
usec_config = 0x04bf; /* (191+1)/(4+1) */
break;
case 48000000:
usec_config = 0x002f; /* (47+1)/(0+1) */
break;
default:
ret = -EINVAL;
goto out;
}
writel_relaxed(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
for_each_possible_cpu(cpu) {
struct timer_of *cpu_to = per_cpu_ptr(&tegra_to, cpu);
unsigned long flags = IRQF_TIMER | IRQF_NOBALANCING;
unsigned long rate = tegra_rate_for_timer(to, tegra20);
unsigned int base = tegra_base_for_cpu(cpu, tegra20);
unsigned int idx = tegra_irq_idx_for_cpu(cpu, tegra20);
unsigned int irq = irq_of_parse_and_map(np, idx);
if (!irq) {
pr_err("failed to map irq for cpu%d\n", cpu);
ret = -EINVAL;
goto out_irq;
}
cpu_to->clkevt.irq = irq;
cpu_to->clkevt.rating = rating;
cpu_to->clkevt.cpumask = cpumask_of(cpu);
cpu_to->of_base.base = timer_reg_base + base;
cpu_to->of_clk.period = rate / HZ;
cpu_to->of_clk.rate = rate;
irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr, flags,
cpu_to->clkevt.name, &cpu_to->clkevt);
if (ret) {
pr_err("failed to set up irq for cpu%d: %d\n",
cpu, ret);
irq_dispose_mapping(cpu_to->clkevt.irq);
cpu_to->clkevt.irq = 0;
goto out_irq;
}
}
sched_clock_register(tegra_read_sched_clock, 32, TIMER_1MHz);
ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", TIMER_1MHz, 300, 32,
clocksource_mmio_readl_up);
if (ret)
pr_err("failed to register clocksource: %d\n", ret);
#ifdef CONFIG_ARM
register_current_timer_delay(&tegra_delay_timer);
#endif
ret = cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
tegra_timer_stop);
if (ret)
pr_err("failed to set up cpu hp state: %d\n", ret);
return ret;
out_irq:
for_each_possible_cpu(cpu) {
struct timer_of *cpu_to;
cpu_to = per_cpu_ptr(&tegra_to, cpu);
if (cpu_to->clkevt.irq) {
free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
irq_dispose_mapping(cpu_to->clkevt.irq);
}
}
to->of_base.base = timer_reg_base;
out:
timer_of_cleanup(to);
return ret;
}
static int __init tegra210_init_timer(struct device_node *np)
{
/*
* Arch-timer can't survive across power cycle of CPU core and
* after CPUPORESET signal due to a system design shortcoming,
* hence tegra-timer is more preferable on Tegra210.
*/
return tegra_init_timer(np, false, 460);
}
TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra210_init_timer);
static int __init tegra20_init_timer(struct device_node *np)
{
int rating;
/*
* Tegra20 and Tegra30 have Cortex A9 CPU that has a TWD timer,
* that timer runs off the CPU clock and hence is subjected to
* a jitter caused by DVFS clock rate changes. Tegra-timer is
* more preferable for older Tegra's, while later SoC generations
* have arch-timer as a main per-CPU timer and it is not affected
* by DVFS changes.
*/
if (of_machine_is_compatible("nvidia,tegra20") ||
of_machine_is_compatible("nvidia,tegra30"))
rating = 460;
else
rating = 330;
return tegra_init_timer(np, true, rating);
}
TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
static int __init tegra20_init_rtc(struct device_node *np)
{
int ret;
ret = timer_of_init(np, &suspend_rtc_to);
if (ret)
return ret;
return clocksource_register_hz(&suspend_rtc_clocksource, 1000);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);

View File

@ -1,379 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010 Google, Inc.
*
* Author:
* Colin Cross <ccross@google.com>
*/
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/sched_clock.h>
#include <linux/time.h>
#include "timer-of.h"
#ifdef CONFIG_ARM
#include <asm/mach/time.h>
#endif
#define RTC_SECONDS 0x08
#define RTC_SHADOW_SECONDS 0x0c
#define RTC_MILLISECONDS 0x10
#define TIMERUS_CNTR_1US 0x10
#define TIMERUS_USEC_CFG 0x14
#define TIMERUS_CNTR_FREEZE 0x4c
#define TIMER_PTV 0x0
#define TIMER_PTV_EN BIT(31)
#define TIMER_PTV_PER BIT(30)
#define TIMER_PCR 0x4
#define TIMER_PCR_INTR_CLR BIT(30)
#ifdef CONFIG_ARM
#define TIMER_CPU0 0x50 /* TIMER3 */
#else
#define TIMER_CPU0 0x90 /* TIMER10 */
#define TIMER10_IRQ_IDX 10
#define IRQ_IDX_FOR_CPU(cpu) (TIMER10_IRQ_IDX + cpu)
#endif
#define TIMER_BASE_FOR_CPU(cpu) (TIMER_CPU0 + (cpu) * 8)
static u32 usec_config;
static void __iomem *timer_reg_base;
#ifdef CONFIG_ARM
static struct delay_timer tegra_delay_timer;
#endif
static int tegra_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel(TIMER_PTV_EN |
((cycles > 1) ? (cycles - 1) : 0), /* n+1 scheme */
reg_base + TIMER_PTV);
return 0;
}
static int tegra_timer_shutdown(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel(0, reg_base + TIMER_PTV);
return 0;
}
static int tegra_timer_set_periodic(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel(TIMER_PTV_EN | TIMER_PTV_PER |
((timer_of_rate(to_timer_of(evt)) / HZ) - 1),
reg_base + TIMER_PTV);
return 0;
}
static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void tegra_timer_suspend(struct clock_event_device *evt)
{
void __iomem *reg_base = timer_of_base(to_timer_of(evt));
writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
}
static void tegra_timer_resume(struct clock_event_device *evt)
{
writel(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
}
#ifdef CONFIG_ARM64
static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
.flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
.clkevt = {
.name = "tegra_timer",
.rating = 460,
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.set_next_event = tegra_timer_set_next_event,
.set_state_shutdown = tegra_timer_shutdown,
.set_state_periodic = tegra_timer_set_periodic,
.set_state_oneshot = tegra_timer_shutdown,
.tick_resume = tegra_timer_shutdown,
.suspend = tegra_timer_suspend,
.resume = tegra_timer_resume,
},
};
static int tegra_timer_setup(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
enable_irq(to->clkevt.irq);
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
1, /* min */
0x1fffffff); /* 29 bits */
return 0;
}
static int tegra_timer_stop(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
to->clkevt.set_state_shutdown(&to->clkevt);
disable_irq_nosync(to->clkevt.irq);
return 0;
}
#else /* CONFIG_ARM */
static struct timer_of tegra_to = {
.flags = TIMER_OF_CLOCK | TIMER_OF_BASE | TIMER_OF_IRQ,
.clkevt = {
.name = "tegra_timer",
.rating = 300,
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DYNIRQ,
.set_next_event = tegra_timer_set_next_event,
.set_state_shutdown = tegra_timer_shutdown,
.set_state_periodic = tegra_timer_set_periodic,
.set_state_oneshot = tegra_timer_shutdown,
.tick_resume = tegra_timer_shutdown,
.suspend = tegra_timer_suspend,
.resume = tegra_timer_resume,
.cpumask = cpu_possible_mask,
},
.of_irq = {
.index = 2,
.flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
.handler = tegra_timer_isr,
},
};
static u64 notrace tegra_read_sched_clock(void)
{
return readl(timer_reg_base + TIMERUS_CNTR_1US);
}
static unsigned long tegra_delay_timer_read_counter_long(void)
{
return readl(timer_reg_base + TIMERUS_CNTR_1US);
}
static struct timer_of suspend_rtc_to = {
.flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
};
/*
* tegra_rtc_read - Reads the Tegra RTC registers
* Care must be taken that this funciton is not called while the
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
static u64 tegra_rtc_read_ms(struct clocksource *cs)
{
u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
return (u64)s * MSEC_PER_SEC + ms;
}
static struct clocksource suspend_rtc_clocksource = {
.name = "tegra_suspend_timer",
.rating = 200,
.read = tegra_rtc_read_ms,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
#endif
static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
{
int ret = 0;
ret = timer_of_init(np, to);
if (ret < 0)
goto out;
timer_reg_base = timer_of_base(to);
/*
* Configure microsecond timers to have 1MHz clock
* Config register is 0xqqww, where qq is "dividend", ww is "divisor"
* Uses n+1 scheme
*/
switch (timer_of_rate(to)) {
case 12000000:
usec_config = 0x000b; /* (11+1)/(0+1) */
break;
case 12800000:
usec_config = 0x043f; /* (63+1)/(4+1) */
break;
case 13000000:
usec_config = 0x000c; /* (12+1)/(0+1) */
break;
case 16800000:
usec_config = 0x0453; /* (83+1)/(4+1) */
break;
case 19200000:
usec_config = 0x045f; /* (95+1)/(4+1) */
break;
case 26000000:
usec_config = 0x0019; /* (25+1)/(0+1) */
break;
case 38400000:
usec_config = 0x04bf; /* (191+1)/(4+1) */
break;
case 48000000:
usec_config = 0x002f; /* (47+1)/(0+1) */
break;
default:
ret = -EINVAL;
goto out;
}
writel(usec_config, timer_of_base(to) + TIMERUS_USEC_CFG);
out:
return ret;
}
#ifdef CONFIG_ARM64
static int __init tegra_init_timer(struct device_node *np)
{
int cpu, ret = 0;
struct timer_of *to;
to = this_cpu_ptr(&tegra_to);
ret = tegra_timer_common_init(np, to);
if (ret < 0)
goto out;
for_each_possible_cpu(cpu) {
struct timer_of *cpu_to;
cpu_to = per_cpu_ptr(&tegra_to, cpu);
cpu_to->of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(cpu);
cpu_to->of_clk.rate = timer_of_rate(to);
cpu_to->clkevt.cpumask = cpumask_of(cpu);
cpu_to->clkevt.irq =
irq_of_parse_and_map(np, IRQ_IDX_FOR_CPU(cpu));
if (!cpu_to->clkevt.irq) {
pr_err("%s: can't map IRQ for CPU%d\n",
__func__, cpu);
ret = -EINVAL;
goto out;
}
irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
IRQF_TIMER | IRQF_NOBALANCING,
cpu_to->clkevt.name, &cpu_to->clkevt);
if (ret) {
pr_err("%s: cannot setup irq %d for CPU%d\n",
__func__, cpu_to->clkevt.irq, cpu);
ret = -EINVAL;
goto out_irq;
}
}
cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
"AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
tegra_timer_stop);
return ret;
out_irq:
for_each_possible_cpu(cpu) {
struct timer_of *cpu_to;
cpu_to = per_cpu_ptr(&tegra_to, cpu);
if (cpu_to->clkevt.irq) {
free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
irq_dispose_mapping(cpu_to->clkevt.irq);
}
}
out:
timer_of_cleanup(to);
return ret;
}
#else /* CONFIG_ARM */
static int __init tegra_init_timer(struct device_node *np)
{
int ret = 0;
ret = tegra_timer_common_init(np, &tegra_to);
if (ret < 0)
goto out;
tegra_to.of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(0);
tegra_to.of_clk.rate = 1000000; /* microsecond timer */
sched_clock_register(tegra_read_sched_clock, 32,
timer_of_rate(&tegra_to));
ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
"timer_us", timer_of_rate(&tegra_to),
300, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("Failed to register clocksource\n");
goto out;
}
tegra_delay_timer.read_current_timer =
tegra_delay_timer_read_counter_long;
tegra_delay_timer.freq = timer_of_rate(&tegra_to);
register_current_timer_delay(&tegra_delay_timer);
clockevents_config_and_register(&tegra_to.clkevt,
timer_of_rate(&tegra_to),
0x1,
0x1fffffff);
return ret;
out:
timer_of_cleanup(&tegra_to);
return ret;
}
static int __init tegra20_init_rtc(struct device_node *np)
{
int ret;
ret = timer_of_init(np, &suspend_rtc_to);
if (ret)
return ret;
clocksource_register_hz(&suspend_rtc_clocksource, 1000);
return 0;
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
#endif
TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra_init_timer);
TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra_init_timer);

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI DaVinci clocksource driver
*
* Copyright (C) 2019 Texas Instruments
* Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
*/
#ifndef __TIMER_DAVINCI_H__
#define __TIMER_DAVINCI_H__
#include <linux/clk.h>
#include <linux/ioport.h>
enum {
DAVINCI_TIMER_CLOCKEVENT_IRQ,
DAVINCI_TIMER_CLOCKSOURCE_IRQ,
DAVINCI_TIMER_NUM_IRQS,
};
/**
* struct davinci_timer_cfg - davinci clocksource driver configuration struct
* @reg: register range resource
* @irq: clockevent and clocksource interrupt resources
* @cmp_off: if set - it specifies the compare register used for clockevent
*
* Note: if the compare register is specified, the driver will use the bottom
* clock half for both clocksource and clockevent and the compare register
* to generate event irqs. The user must supply the correct compare register
* interrupt number.
*
* This is only used by da830 the DSP of which uses the top half. The timer
* driver still configures the top half to run in free-run mode.
*/
struct davinci_timer_cfg {
struct resource reg;
struct resource irq[DAVINCI_TIMER_NUM_IRQS];
unsigned int cmp_off;
};
int __init davinci_timer_register(struct clk *clk,
const struct davinci_timer_cfg *data);
#endif /* __TIMER_DAVINCI_H__ */

View File

@ -116,10 +116,10 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,