diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 51be2295fc3..94aa75e4270 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -7,6 +7,8 @@ #include +extern int msm_krait_need_wfe_fixup; + /* * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K * extensions, so when running on UP, we have to patch these instructions away. @@ -34,6 +36,31 @@ #define WFE() ALT_SMP("wfe", "nop") #endif +/* + * The fixup involves disabling interrupts during execution of the WFE + * instruction. This could potentially lead to deadlock if a thread is trying + * to acquire a spinlock which is being released from an interrupt context. + */ +#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP +#define WFE_SAFE(fixup, tmp) \ +" mrs " tmp ", cpsr\n" \ +" cmp " fixup ", #0\n" \ +" wfeeq\n" \ +" beq 10f\n" \ +" cpsid if\n" \ +" mrc p15, 7, " fixup ", c15, c0, 5\n" \ +" bic " fixup ", " fixup ", #0x10000\n" \ +" mcr p15, 7, " fixup ", c15, c0, 5\n" \ +" isb\n" \ +" wfe\n" \ +" orr " fixup ", " fixup ", #0x10000\n" \ +" mcr p15, 7, " fixup ", c15, c0, 5\n" \ +" isb\n" \ +"10: msr cpsr_cf, " tmp "\n" +#else +#define WFE_SAFE(fixup, tmp) " wfe\n" +#endif + static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 @@ -71,18 +98,18 @@ static inline void dsb_sev(void) static inline void arch_spin_lock(arch_spinlock_t *lock) { - unsigned long tmp; + unsigned long tmp, fixup = msm_krait_need_wfe_fixup; __asm__ __volatile__( "1: ldrex %[tmp], [%[lock]]\n" " teq %[tmp], #0\n" " beq 2f\n" - WFE() + WFE_SAFE("%[fixup]", "%[tmp]") "2:\n" " strexeq %[tmp], %[bit0], [%[lock]]\n" " teqeq %[tmp], #0\n" " bne 1b" - : [tmp] "=&r" (tmp) + : [tmp] "=&r" (tmp), [fixup] "+r" (fixup) : [lock] "r" (&lock->lock), [bit0] "r" (1) : "cc"); @@ -149,6 +176,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp, ticket, next_ticket; + unsigned long fixup = msm_krait_need_wfe_fixup; /* Grab the next ticket and wait for it to be "served" */ __asm__ __volatile__( @@ -161,13 +189,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) "2:\n" #ifdef CONFIG_CPU_32v6K " beq 3f\n" - WFE() + WFE_SAFE("%[fixup]", "%[tmp]") "3:\n" #endif " ldr %[tmp], [%[lockaddr]]\n" " cmp %[ticket], %[tmp], lsr #16\n" " bne 2b" - : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket) + : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), + [next_ticket]"=&r" (next_ticket), [fixup]"+r" (fixup) : [lockaddr]"r" (&lock->lock), [val1]"r" (1) : "cc"); smp_mb(); @@ -216,7 +245,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - unsigned long ticket; + unsigned long ticket, tmp, fixup = msm_krait_need_wfe_fixup; /* Wait for now_serving == next_ticket */ __asm__ __volatile__( @@ -224,7 +253,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) " cmpne %[lockaddr], %[lockaddr]\n" "1:\n" " beq 2f\n" - WFE() + WFE_SAFE("%[fixup]", "%[tmp]") "2:\n" #else "1:\n" @@ -234,7 +263,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) " uxth %[ticket], %[ticket]\n" " cmp %[ticket], #0\n" " bne 1b" - : [ticket]"=&r" (ticket) + : [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), + [fixup]"+r" (fixup) : [lockaddr]"r" (&lock->lock) : "cc"); } @@ -262,18 +292,18 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) static inline void arch_write_lock(arch_rwlock_t *rw) { - unsigned long tmp; + unsigned long tmp, fixup = msm_krait_need_wfe_fixup; __asm__ __volatile__( "1: ldrex %[tmp], [%[lock]]\n" " teq %[tmp], #0\n" " beq 2f\n" - WFE() + WFE_SAFE("%[fixup]", "%[tmp]") "2:\n" " strexeq %[tmp], %[bit31], [%[lock]]\n" " teq %[tmp], #0\n" " bne 1b" - : [tmp] "=&r" (tmp) + : [tmp] "=&r" (tmp), [fixup] "+r" (fixup) : [lock] "r" (&rw->lock), [bit31] "r" (0x80000000) : "cc"); @@ -330,18 +360,18 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) */ static inline void arch_read_lock(arch_rwlock_t *rw) { - unsigned long tmp, tmp2; + unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup; __asm__ __volatile__( "1: ldrex %[tmp], [%[lock]]\n" " adds %[tmp], %[tmp], #1\n" " strexpl %[tmp2], %[tmp], [%[lock]]\n" " bpl 2f\n" - WFE() + WFE_SAFE("%[fixup]", "%[tmp]") "2:\n" " rsbpls %[tmp], %[tmp2], #0\n" " bmi 1b" - : [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2) + : [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2), [fixup] "+r" (fixup) : [lock] "r" (&rw->lock) : "cc"); diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index c23bc1093b6..5e7965efc54 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -172,6 +172,7 @@ config ARCH_MSM8960 select HOLES_IN_ZONE if SPARSEMEM select MSM_RUN_QUEUE_STATS select ARM_HAS_SG_CHAIN + select MSM_KRAIT_WFE_FIXUP config ARCH_MSM8930 bool "MSM8930" @@ -203,6 +204,7 @@ config ARCH_MSM8930 select MSM_PM8X60 if PM select HOLES_IN_ZONE if SPARSEMEM select ARM_HAS_SG_CHAIN + select MSM_KRAIT_WFE_FIXUP config ARCH_APQ8064 bool "APQ8064" @@ -229,6 +231,7 @@ config ARCH_APQ8064 select MIGHT_HAVE_PCI select ARCH_SUPPORTS_MSI select ARM_HAS_SG_CHAIN + select MSM_KRAIT_WFE_FIXUP config ARCH_MSM8974 bool "MSM8974" @@ -370,6 +373,9 @@ config ARCH_MSM_CORTEXMP select MSM_SMP bool +config MSM_KRAIT_WFE_FIXUP + bool + config ARCH_MSM_CORTEX_A5 bool select HAVE_HW_BRKPT_RESERVED_RW_ACCESS diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 32f61f50afd..b6fb52aeead 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -40,6 +41,8 @@ static unsigned long phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; +int msm_krait_need_wfe_fixup; +EXPORT_SYMBOL(msm_krait_need_wfe_fixup); static int __init early_initrd(char *p) { @@ -916,3 +919,17 @@ static int __init keepinitrd_setup(char *__unused) __setup("keepinitrd", keepinitrd_setup); #endif + +#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP +static int __init msm_krait_wfe_init(void) +{ + unsigned int val, midr; + midr = read_cpuid_id() & 0xffffff00; + if ((midr == 0x511f0400) || (midr == 0x510f0600)) { + asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val)); + msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0; + } + return 0; +} +pure_initcall(msm_krait_wfe_init); +#endif