219 lines
5.1 KiB
C
219 lines
5.1 KiB
C
#ifndef __ASMARM_ARCH_TIMER_H
|
|
#define __ASMARM_ARCH_TIMER_H
|
|
|
|
#include <asm/barrier.h>
|
|
#include <asm/errno.h>
|
|
#include <linux/clocksource.h>
|
|
#include <linux/init.h>
|
|
#include <linux/types.h>
|
|
|
|
#include <clocksource/arm_arch_timer.h>
|
|
|
|
#ifdef CONFIG_ARM_ARCH_TIMER
|
|
int arch_timer_arch_init(void);
|
|
|
|
/*
|
|
* These register accessors are marked inline so the compiler can
|
|
* nicely work out which register we want, and chuck away the rest of
|
|
* the code. At least it does so with a recent GCC (4.6.3).
|
|
*/
|
|
static __always_inline
|
|
void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
|
|
{
|
|
if (access == ARCH_TIMER_PHYS_ACCESS) {
|
|
switch (reg) {
|
|
case ARCH_TIMER_REG_CTRL:
|
|
asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
|
|
break;
|
|
case ARCH_TIMER_REG_TVAL:
|
|
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
|
|
break;
|
|
}
|
|
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
|
|
switch (reg) {
|
|
case ARCH_TIMER_REG_CTRL:
|
|
asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
|
|
break;
|
|
case ARCH_TIMER_REG_TVAL:
|
|
asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
|
|
break;
|
|
}
|
|
}
|
|
|
|
isb();
|
|
}
|
|
|
|
static __always_inline
|
|
u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
|
|
{
|
|
u32 val = 0;
|
|
|
|
if (access == ARCH_TIMER_PHYS_ACCESS) {
|
|
switch (reg) {
|
|
case ARCH_TIMER_REG_CTRL:
|
|
asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
|
|
break;
|
|
case ARCH_TIMER_REG_TVAL:
|
|
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
|
|
break;
|
|
}
|
|
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
|
|
switch (reg) {
|
|
case ARCH_TIMER_REG_CTRL:
|
|
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
|
|
break;
|
|
case ARCH_TIMER_REG_TVAL:
|
|
asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
|
|
break;
|
|
}
|
|
}
|
|
|
|
return val;
|
|
}
|
|
|
|
static __always_inline
|
|
u64 arch_timer_reg_read_cval(int access)
|
|
{
|
|
u64 cval;
|
|
|
|
if (access == ARCH_TIMER_PHYS_ACCESS)
|
|
asm volatile("mrrc p15, 2, %Q0, %R0, c14" : "=r" (cval));
|
|
else if (access == ARCH_TIMER_VIRT_ACCESS)
|
|
asm volatile("mrrc p15, 3, %Q0, %R0, c14" : "=r" (cval));
|
|
else
|
|
cval = 0;
|
|
|
|
return cval;
|
|
}
|
|
|
|
static inline u32 arch_timer_get_cntfrq(void)
|
|
{
|
|
u32 val;
|
|
asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
|
|
return val;
|
|
}
|
|
|
|
#ifdef CONFIG_ARCH_SUN50I
|
|
#define ARCH_PCNT_TRY_MAX_TIME (12)
|
|
#define ARCH_PCNT_MAX_DELTA (8)
|
|
static inline u64 arch_counter_get_cntpct(void)
|
|
{
|
|
u64 pct0;
|
|
u64 pct1;
|
|
u64 delta;
|
|
u32 retry = 0;
|
|
|
|
/* sun50i vcnt maybe imprecise,
|
|
* we should try to fix this.
|
|
*/
|
|
while (retry < ARCH_PCNT_TRY_MAX_TIME) {
|
|
isb();
|
|
asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (pct0));
|
|
isb();
|
|
asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (pct1));
|
|
delta = pct1 - pct0;
|
|
if ((pct1 >= pct0) && (delta < ARCH_PCNT_MAX_DELTA)) {
|
|
/* read valid vcnt */
|
|
return pct1;
|
|
}
|
|
/* vcnt value error, try again */
|
|
retry++;
|
|
}
|
|
/* Do not warry for this, just return the last time vcnt.
|
|
* arm64 have enabled CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE.
|
|
*/
|
|
return pct1;
|
|
}
|
|
#else
|
|
static inline u64 arch_counter_get_cntpct(void)
|
|
{
|
|
u64 cval;
|
|
|
|
isb();
|
|
asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
|
|
return cval;
|
|
}
|
|
#endif /* CONFIG_ARCH_SUN50I */
|
|
|
|
#ifdef CONFIG_ARCH_SUN50I
|
|
#define ARCH_VCNT_TRY_MAX_TIME (12)
|
|
#define ARCH_VCNT_MAX_DELTA (8)
|
|
static inline u64 arch_counter_get_cntvct(void)
|
|
{
|
|
u64 vct0;
|
|
u64 vct1;
|
|
u64 delta;
|
|
u32 retry = 0;
|
|
|
|
/* sun50i vcnt maybe imprecise,
|
|
* we should try to fix this.
|
|
*/
|
|
while (retry < ARCH_VCNT_TRY_MAX_TIME) {
|
|
isb();
|
|
asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (vct0));
|
|
isb();
|
|
asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (vct1));
|
|
delta = vct1 - vct0;
|
|
if ((vct1 >= vct0) && (delta < ARCH_VCNT_MAX_DELTA)) {
|
|
/* read valid vcnt */
|
|
return vct1;
|
|
}
|
|
/* vcnt value error, try again */
|
|
retry++;
|
|
}
|
|
/* Do not warry for this, just return the last time vcnt.
|
|
* arm64 have enabled CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE.
|
|
*/
|
|
return vct1;
|
|
}
|
|
#else
|
|
static inline u64 arch_counter_get_cntvct(void)
|
|
{
|
|
u64 cval;
|
|
|
|
isb();
|
|
asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
|
|
return cval;
|
|
}
|
|
#endif /* CONFIG_ARCH_SUN50I */
|
|
|
|
static inline u32 arch_timer_get_cntkctl(void)
|
|
{
|
|
u32 cntkctl;
|
|
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
|
|
return cntkctl;
|
|
}
|
|
|
|
static inline void arch_timer_set_cntkctl(u32 cntkctl)
|
|
{
|
|
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
|
|
}
|
|
|
|
static inline void __cpuinit arch_counter_set_user_access(void)
|
|
{
|
|
u32 cntkctl = arch_timer_get_cntkctl();
|
|
|
|
/* Disable user access to both physical/virtual counters/timers */
|
|
/* Also disable virtual event stream */
|
|
cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
|
|
| ARCH_TIMER_USR_VT_ACCESS_EN
|
|
| ARCH_TIMER_VIRT_EVT_EN
|
|
| ARCH_TIMER_USR_VCT_ACCESS_EN
|
|
| ARCH_TIMER_USR_PCT_ACCESS_EN);
|
|
arch_timer_set_cntkctl(cntkctl);
|
|
}
|
|
|
|
static inline void arch_timer_evtstrm_enable(int divider)
|
|
{
|
|
u32 cntkctl = arch_timer_get_cntkctl();
|
|
cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
|
|
/* Set the divider and enable virtual event stream */
|
|
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
|
|
| ARCH_TIMER_VIRT_EVT_EN;
|
|
arch_timer_set_cntkctl(cntkctl);
|
|
elf_hwcap |= HWCAP_EVTSTRM;
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|