From c73cb2cce2c5c04163c0b86f8935dd9bf2de16ff Mon Sep 17 00:00:00 2001 From: Rohit Vaswani Date: Mon, 4 Jan 2016 14:08:32 -0800 Subject: [PATCH] ANDROID: GKI: Revert "arm64: kill flush_cache_all()" This reverts the upstream commit <68234df4ea79> ("arm64: kill flush_cache_all()"). This is required internally for certain use-cases like flushing cache before reboot to ensure all the data is available in the ramdump. Signed-off-by: Rohit Vaswani Signed-off-by: Venkata Narendra Kumar Gutta (cherry picked from commit 208984f39cda4c963d052c48862e3e9e0b4dba55) [surenb: resolve merge conflicts, do not add unused cpu_soft_restart function because it conflicts with the function of the same name from arch/arm64/kernel/cpu-reset.h] Bug: 153349826 Test: build Signed-off-by: Suren Baghdasaryan Change-Id: I47b4f4ba4001c1fbb53794c6da0b3fdf641bd981 --- arch/arm64/include/asm/cacheflush.h | 5 ++ arch/arm64/include/asm/proc-fns.h | 2 + arch/arm64/mm/cache.S | 73 +++++++++++++++++++++++++++++ arch/arm64/mm/flush.c | 1 + arch/arm64/mm/proc.S | 31 ++++++++++++ 5 files changed, 112 insertions(+) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 19844211a4e6..93adc5f9e7e7 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -41,6 +41,10 @@ * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT I-cache. * + * flush_cache_all() + * + * Unconditionally clean and invalidate the entire cache. + * * flush_cache_mm(mm) * * Clean and invalidate all user space cache entries @@ -72,6 +76,7 @@ * - kaddr - page address * - size - region size */ +extern void flush_cache_all(void); extern void __flush_icache_range(unsigned long start, unsigned long end); extern int invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 16cef2e8449e..2408fb6f89cd 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -28,8 +28,10 @@ struct mm_struct; struct cpu_suspend_ctx; +extern void cpu_cache_off(void); extern void cpu_do_idle(void); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index a194fd0e837f..8719253168f2 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -25,6 +25,79 @@ #include #include +/* + * __flush_dcache_all() + * + * Flush the whole D-cache. + * + * Corrupted registers: x0-x7, x9-x11 + */ +__flush_dcache_all: + dmb sy // ensure ordering with previous memory accesses + mrs x0, clidr_el1 // read clidr + and x3, x0, #0x7000000 // extract loc from clidr + lsr x3, x3, #23 // left align loc bit field + cbz x3, finished // if loc is 0, then no need to clean + mov x10, #0 // start clean at cache level 0 +loop1: + add x2, x10, x10, lsr #1 // work out 3x current cache level + lsr x1, x0, x2 // extract cache type bits from clidr + and x1, x1, #7 // mask of the bits for current cache only + cmp x1, #2 // see what cache we have at this level + b.lt skip // skip if no cache, or just i-cache + save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic + msr csselr_el1, x10 // select current cache level in csselr + isb // isb to sych the new cssr&csidr + mrs x1, ccsidr_el1 // read the new ccsidr + restore_irqs x9 + and x2, x1, #7 // extract the length of the cache lines + add x2, x2, #4 // add 4 (line length offset) + mov x4, #0x3ff + and x4, x4, x1, lsr #3 // find maximum number on the way size + clz w5, w4 // find bit position of way size increment + mov x7, #0x7fff + and x7, x7, x1, lsr #13 // extract max number of the index size +loop2: + mov x9, x4 // create working copy of max way size +loop3: + lsl x6, x9, x5 + orr x11, x10, x6 // factor way and cache number into x11 + lsl x6, x7, x2 + orr x11, x11, x6 // factor index number into x11 + dc cisw, x11 // clean & invalidate by set/way + subs x9, x9, #1 // decrement the way + b.ge loop3 + subs x7, x7, #1 // decrement the index + b.ge loop2 +skip: + add x10, x10, #2 // increment cache number + cmp x3, x10 + b.gt loop1 +finished: + mov x10, #0 // swith back to cache level 0 + msr csselr_el1, x10 // select current cache level in csselr + dsb sy + isb + ret +ENDPROC(__flush_dcache_all) + +/* + * flush_cache_all() + * + * Flush the entire cache system. The data cache flush is now achieved + * using atomic clean / invalidates working outwards from L1 cache. This + * is done using Set/Way based cache maintenance instructions. The + * instruction cache can still be invalidated back to the point of + * unification in a single instruction. + */ +ENTRY(flush_cache_all) + mov x12, lr + bl __flush_dcache_all + mov x0, #0 + ic ialluis // I+BTB cache invalidate + ret x12 +ENDPROC(flush_cache_all) + /* * flush_icache_range(start,end) * diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 5c9073bace83..fb10aa0e7265 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -87,6 +87,7 @@ EXPORT_SYMBOL(flush_dcache_page); /* * Additional functions defined in assembly. */ +EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(__flush_icache_range); #ifdef CONFIG_ARCH_HAS_PMEM_API diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 9c9d7393d5b7..201624a5018b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -55,6 +55,37 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) +/* + * cpu_cache_off() + * + * Turn the CPU D-cache off. + */ +ENTRY(cpu_cache_off) + mrs x0, sctlr_el1 + bic x0, x0, #1 << 2 // clear SCTLR.C + msr sctlr_el1, x0 + isb + ret +ENDPROC(cpu_cache_off) + +/* + * cpu_reset(loc) + * + * Perform a soft reset of the system. Put the CPU into the same state + * as it would be if it had been reset, and branch to what would be the + * reset vector. It must be executed with the flat identity mapping. + * + * - loc - location to jump to for soft reset + */ + .align 5 +ENTRY(cpu_reset) + mrs x1, sctlr_el1 + bic x1, x1, #1 + msr sctlr_el1, x1 // disable the MMU + isb + ret x0 +ENDPROC(cpu_reset) + /* * cpu_do_idle() *