diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c index 122ddde00ba7..da5569d83d58 100644 --- a/arch/arm/mach-ux500/cache-l2x0.c +++ b/arch/arm/mach-ux500/cache-l2x0.c @@ -12,44 +12,6 @@ static void __iomem *l2x0_base; -static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) -{ - /* wait for the operation to complete */ - while (readl_relaxed(reg) & mask) - cpu_relax(); -} - -static inline void ux500_cache_sync(void) -{ - writel_relaxed(0, l2x0_base + L2X0_CACHE_SYNC); - ux500_cache_wait(l2x0_base + L2X0_CACHE_SYNC, 1); -} - -/* - * The L2 cache cannot be turned off in the non-secure world. - * Dummy until a secure service is in place. - */ -static void ux500_l2x0_disable(void) -{ -} - -/* - * This is only called when doing a kexec, just after turning off the L2 - * and L1 cache, and it is surrounded by a spinlock in the generic version. - * However, we're not really turning off the L2 cache right now and the - * PL310 does not support exclusive accesses (used to implement the spinlock). - * So, the invalidation needs to be done without the spinlock. - */ -static void ux500_l2x0_inv_all(void) -{ - uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ - - /* invalidate all ways */ - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); - ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); - ux500_cache_sync(); -} - static int __init ux500_l2x0_unlock(void) { int i; @@ -85,9 +47,13 @@ static int __init ux500_l2x0_init(void) /* 64KB way size, 8 way associativity, force WA */ l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); - /* Override invalidate function */ - outer_cache.disable = ux500_l2x0_disable; - outer_cache.inv_all = ux500_l2x0_inv_all; + /* + * We can't disable l2 as we are in non secure mode, currently + * this seems be called only during kexec path. So let's + * override outer.disable with nasty assignment until we have + * some SMI service available. + */ + outer_cache.disable = NULL; return 0; }