diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index fa6b7c78f18a..6e98207c95e8 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -56,19 +56,12 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
                 *(pteptr) = (pteval);                           \
         } while(0)
 
-#define pte_inserted(x)						\
-	((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED))		\
-	 == (_PAGE_PRESENT|_PAGE_ACCESSED))
-
 #define set_pte_at(mm, addr, ptep, pteval)			\
 	do {							\
-		pte_t old_pte;					\
 		unsigned long flags;				\
 		spin_lock_irqsave(&pa_tlb_lock, flags);		\
-		old_pte = *ptep;				\
-		if (pte_inserted(old_pte))			\
-			purge_tlb_entries(mm, addr);		\
 		set_pte(ptep, pteval);				\
+		purge_tlb_entries(mm, addr);			\
 		spin_unlock_irqrestore(&pa_tlb_lock, flags);	\
 	} while (0)
 
@@ -202,7 +195,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
 #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
 
-#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 #define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
@@ -227,22 +220,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 
 #ifndef __ASSEMBLY__
 
-#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
+#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
 /* Others seem to make this executable, I don't know if that's correct
    or not.  The stack is mapped this way though so this is necessary
    in the short term - dhd@linuxcare.com, 2000-08-08 */
-#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
-#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
+#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
+#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
 #define PAGE_COPY       PAGE_EXECREAD
-#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
 #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
 #define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
 #define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
-#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
+#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
 
 
 /*
@@ -479,8 +472,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
 		spin_unlock_irqrestore(&pa_tlb_lock, flags);
 		return 0;
 	}
-	purge_tlb_entries(vma->vm_mm, addr);
 	set_pte(ptep, pte_mkold(pte));
+	purge_tlb_entries(vma->vm_mm, addr);
 	spin_unlock_irqrestore(&pa_tlb_lock, flags);
 	return 1;
 }
@@ -493,9 +486,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 
 	spin_lock_irqsave(&pa_tlb_lock, flags);
 	old_pte = *ptep;
-	if (pte_inserted(old_pte))
-		purge_tlb_entries(mm, addr);
 	set_pte(ptep, __pte(0));
+	purge_tlb_entries(mm, addr);
 	spin_unlock_irqrestore(&pa_tlb_lock, flags);
 
 	return old_pte;
@@ -505,8 +497,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 {
 	unsigned long flags;
 	spin_lock_irqsave(&pa_tlb_lock, flags);
-	purge_tlb_entries(mm, addr);
 	set_pte(ptep, pte_wrprotect(*ptep));
+	purge_tlb_entries(mm, addr);
 	spin_unlock_irqrestore(&pa_tlb_lock, flags);
 }
 
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 8a63515f03bf..16aec9ba2580 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
 	volatile unsigned int *a;
 
 	a = __ldcw_align(x);
-	mb();
-	*a = 1;
+	/* Release with ordered store. */
+	__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *x)
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index bddd2acebdcc..f9e83939b473 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -36,6 +36,7 @@ EXPORT_SYMBOL(dcache_stride);
 
 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 EXPORT_SYMBOL(flush_dcache_page_asm);
+void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 
 
@@ -303,6 +304,17 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
 	preempt_enable();
 }
 
+static inline void
+__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+		   unsigned long physaddr)
+{
+	preempt_disable();
+	purge_dcache_page_asm(physaddr, vmaddr);
+	if (vma->vm_flags & VM_EXEC)
+		flush_icache_page_asm(physaddr, vmaddr);
+	preempt_enable();
+}
+
 void flush_dcache_page(struct page *page)
 {
 	struct address_space *mapping = page_mapping_file(page);
@@ -364,7 +376,7 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm);
 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
 static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
 
-#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
+#define FLUSH_TLB_THRESHOLD (512*1024) /* 0.5MB minimum TLB threshold */
 static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
 
 void __init parisc_setup_cache_timing(void)
@@ -404,10 +416,6 @@ void __init parisc_setup_cache_timing(void)
 		goto set_tlb_threshold;
 	}
 
-	alltime = mfctl(16);
-	flush_tlb_all();
-	alltime = mfctl(16) - alltime;
-
 	size = 0;
 	start = (unsigned long) _text;
 	rangetime = mfctl(16);
@@ -418,13 +426,19 @@ void __init parisc_setup_cache_timing(void)
 	}
 	rangetime = mfctl(16) - rangetime;
 
-	printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
+	alltime = mfctl(16);
+	flush_tlb_all();
+	alltime = mfctl(16) - alltime;
+
+	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
 		alltime, size, rangetime);
 
-	threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
+	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
+	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
+		threshold/1024);
 
 set_tlb_threshold:
-	if (threshold)
+	if (threshold > parisc_tlb_flush_threshold)
 		parisc_tlb_flush_threshold = threshold;
 	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
 		parisc_tlb_flush_threshold/1024);
@@ -573,9 +587,12 @@ void flush_cache_mm(struct mm_struct *mm)
 			pfn = pte_pfn(*ptep);
 			if (!pfn_valid(pfn))
 				continue;
-			if (unlikely(mm->context))
+			if (unlikely(mm->context)) {
 				flush_tlb_page(vma, addr);
-			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+			} else {
+				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
+			}
 		}
 	}
 }
@@ -610,9 +627,12 @@ void flush_cache_range(struct vm_area_struct *vma,
 			continue;
 		pfn = pte_pfn(*ptep);
 		if (pfn_valid(pfn)) {
-			if (unlikely(vma->vm_mm->context))
+			if (unlikely(vma->vm_mm->context)) {
 				flush_tlb_page(vma, addr);
-			__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
+			} else {
+				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
+			}
 		}
 	}
 }
@@ -621,9 +641,12 @@ void
 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
 {
 	if (pfn_valid(pfn)) {
-		if (likely(vma->vm_mm->context))
+		if (likely(vma->vm_mm->context)) {
 			flush_tlb_page(vma, vmaddr);
-		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+		} else {
+			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+		}
 	}
 }
 
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 1b4732e20137..a63bf2413d17 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -430,8 +430,6 @@
 	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
 	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
 	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
-	LDREG		%r0(\pmd),\pte
-	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
 	.endm
 
 	/* Look up PTE in a 3-Level scheme.
@@ -462,7 +460,7 @@
 	L2_ptep		\pgd,\pte,\index,\va,\fault
 	.endm
 
-	/* Acquire pa_tlb_lock lock and recheck page is still present. */
+	/* Acquire pa_tlb_lock lock and check page is present. */
 	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
 #ifdef CONFIG_SMP
 	cmpib,COND(=),n	0,\spc,2f
@@ -471,20 +469,20 @@
 	cmpib,COND(=)	0,\tmp1,1b
 	nop
 	LDREG		0(\ptp),\pte
-	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
+	bb,<,n		\pte,_PAGE_PRESENT_BIT,3f
 	b		\fault
-	stw		 \spc,0(\tmp)
-2:
+	stw,ma		\spc,0(\tmp)
 #endif
+2:	LDREG		0(\ptp),\pte
+	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
+3:
 	.endm
 
 	/* Release pa_tlb_lock lock without reloading lock address. */
 	.macro		tlb_unlock0	spc,tmp
 #ifdef CONFIG_SMP
 	or,COND(=)	%r0,\spc,%r0
-	sync
-	or,COND(=)	%r0,\spc,%r0
-	stw             \spc,0(\tmp)
+	stw,ma		\spc,0(\tmp)
 #endif
 	.endm
 
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 97451e67d35b..ec0d5830f386 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -881,7 +881,6 @@ ENTRY_CFI(flush_dcache_page_asm)
 	add		%r28, %r25, %r25
 	sub		%r25, r31, %r25
 
-
 1:      fdc,m		r31(%r28)
 	fdc,m		r31(%r28)
 	fdc,m		r31(%r28)
@@ -897,7 +896,7 @@ ENTRY_CFI(flush_dcache_page_asm)
 	fdc,m		r31(%r28)
 	fdc,m		r31(%r28)
 	fdc,m		r31(%r28)
-	cmpb,COND(<<)	%r28, %r25,1b
+	cmpb,COND(>>)	%r25, %r28, 1b /* predict taken */
 	fdc,m		r31(%r28)
 
 	sync
@@ -908,6 +907,72 @@ ENTRY_CFI(flush_dcache_page_asm)
 	.procend
 ENDPROC_CFI(flush_dcache_page_asm)
 
+ENTRY_CFI(purge_dcache_page_asm)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	ldil		L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+	depdi		0, 31,32, %r28		/* clear any sign extension */
+#endif
+	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
+	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
+	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#else
+	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
+	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
+	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
+#endif
+
+	/* Purge any old translation */
+
+#ifdef CONFIG_PA20
+	pdtlb,l		%r0(%r28)
+#else
+	tlb_lock	%r20,%r21,%r22
+	pdtlb		%r0(%r28)
+	tlb_unlock	%r20,%r21,%r22
+#endif
+
+	ldil		L%dcache_stride, %r1
+	ldw		R%dcache_stride(%r1), r31
+
+#ifdef CONFIG_64BIT
+	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+#else
+	depwi,z		1, 31-PAGE_SHIFT,1, %r25
+#endif
+	add		%r28, %r25, %r25
+	sub		%r25, r31, %r25
+
+1:      pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	pdc,m		r31(%r28)
+	cmpb,COND(>>)	%r25, %r28, 1b /* predict taken */
+	pdc,m		r31(%r28)
+
+	sync
+	bv		%r0(%r2)
+	nop
+	.exit
+
+	.procend
+ENDPROC_CFI(purge_dcache_page_asm)
+
 ENTRY_CFI(flush_icache_page_asm)
 	.proc
 	.callinfo NO_CALLS
@@ -953,7 +1018,6 @@ ENTRY_CFI(flush_icache_page_asm)
 	add		%r28, %r25, %r25
 	sub		%r25, %r31, %r25
 
-
 	/* fic only has the type 26 form on PA1.1, requiring an
 	 * explicit space specification, so use %sr4 */
 1:      fic,m		%r31(%sr4,%r28)
@@ -971,7 +1035,7 @@ ENTRY_CFI(flush_icache_page_asm)
 	fic,m		%r31(%sr4,%r28)
 	fic,m		%r31(%sr4,%r28)
 	fic,m		%r31(%sr4,%r28)
-	cmpb,COND(<<)	%r28, %r25,1b
+	cmpb,COND(>>)	%r25, %r28, 1b /* predict taken */
 	fic,m		%r31(%sr4,%r28)
 
 	sync
@@ -998,7 +1062,6 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
 	add		%r26, %r25, %r25
 	sub		%r25, %r23, %r25
 
-
 1:      fdc,m		%r23(%r26)
 	fdc,m		%r23(%r26)
 	fdc,m		%r23(%r26)
@@ -1014,7 +1077,7 @@ ENTRY_CFI(flush_kernel_dcache_page_asm)
 	fdc,m		%r23(%r26)
 	fdc,m		%r23(%r26)
 	fdc,m		%r23(%r26)
-	cmpb,COND(<<)		%r26, %r25,1b
+	cmpb,COND(>>)	%r25, %r26, 1b /* predict taken */
 	fdc,m		%r23(%r26)
 
 	sync
@@ -1056,7 +1119,7 @@ ENTRY_CFI(purge_kernel_dcache_page_asm)
 	pdc,m		%r23(%r26)
 	pdc,m		%r23(%r26)
 	pdc,m		%r23(%r26)
-	cmpb,COND(<<)		%r26, %r25, 1b
+	cmpb,COND(>>)	%r25, %r26, 1b /* predict taken */
 	pdc,m		%r23(%r26)
 
 	sync
@@ -1077,7 +1140,33 @@ ENTRY_CFI(flush_user_dcache_range_asm)
 	ldo		-1(%r23), %r21
 	ANDCM		%r26, %r21, %r26
 
-1:      cmpb,COND(<<),n	%r26, %r25, 1b
+#ifdef CONFIG_64BIT
+	depd,z		%r23, 59, 60, %r21
+#else
+	depw,z		%r23, 27, 28, %r21
+#endif
+	add		%r26, %r21, %r22
+	cmpb,COND(>>),n	%r22, %r25, 2f /* predict not taken */
+1:	add		%r22, %r21, %r22
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	fdc,m		%r23(%sr3, %r26)
+	cmpb,COND(<<=)	%r22, %r25, 1b /* predict taken */
+	fdc,m		%r23(%sr3, %r26)
+
+2:	cmpb,COND(>>),n	%r25, %r26, 2b
 	fdc,m		%r23(%sr3, %r26)
 
 	sync
@@ -1098,7 +1187,33 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
 	ldo		-1(%r23), %r21
 	ANDCM		%r26, %r21, %r26
 
-1:      cmpb,COND(<<),n	%r26, %r25,1b
+#ifdef CONFIG_64BIT
+	depd,z		%r23, 59, 60, %r21
+#else
+	depw,z		%r23, 27, 28, %r21
+#endif
+	add		%r26, %r21, %r22
+	cmpb,COND(>>),n	%r22, %r25, 2f /* predict not taken */
+1:	add		%r22, %r21, %r22
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	fdc,m		%r23(%r26)
+	cmpb,COND(<<=)	%r22, %r25, 1b /* predict taken */
+	fdc,m		%r23(%r26)
+
+2:	cmpb,COND(>>),n	%r25, %r26, 2b /* predict taken */
 	fdc,m		%r23(%r26)
 
 	sync
@@ -1120,7 +1235,33 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
 	ldo		-1(%r23), %r21
 	ANDCM		%r26, %r21, %r26
 
-1:      cmpb,COND(<<),n	%r26, %r25,1b
+#ifdef CONFIG_64BIT
+	depd,z		%r23, 59, 60, %r21
+#else
+	depw,z		%r23, 27, 28, %r21
+#endif
+	add		%r26, %r21, %r22
+	cmpb,COND(>>),n	%r22, %r25, 2f /* predict not taken */
+1:	add		%r22, %r21, %r22
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	pdc,m		%r23(%r26)
+	cmpb,COND(<<=)	%r22, %r25, 1b /* predict taken */
+	pdc,m		%r23(%r26)
+
+2:	cmpb,COND(>>),n	%r25, %r26, 2b /* predict taken */
 	pdc,m		%r23(%r26)
 
 	sync
@@ -1142,7 +1283,33 @@ ENTRY_CFI(flush_user_icache_range_asm)
 	ldo		-1(%r23), %r21
 	ANDCM		%r26, %r21, %r26
 
-1:      cmpb,COND(<<),n	%r26, %r25,1b
+#ifdef CONFIG_64BIT
+	depd,z		%r23, 59, 60, %r21
+#else
+	depw,z		%r23, 27, 28, %r21
+#endif
+	add		%r26, %r21, %r22
+	cmpb,COND(>>),n	%r22, %r25, 2f /* predict not taken */
+1:	add		%r22, %r21, %r22
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	fic,m		%r23(%sr3, %r26)
+	cmpb,COND(<<=)	%r22, %r25, 1b /* predict taken */
+	fic,m		%r23(%sr3, %r26)
+
+2:	cmpb,COND(>>),n	%r25, %r26, 2b
 	fic,m		%r23(%sr3, %r26)
 
 	sync
@@ -1185,7 +1352,7 @@ ENTRY_CFI(flush_kernel_icache_page)
 	fic,m		%r23(%sr4, %r26)
 	fic,m		%r23(%sr4, %r26)
 	fic,m		%r23(%sr4, %r26)
-	cmpb,COND(<<)		%r26, %r25, 1b
+	cmpb,COND(>>)	%r25, %r26, 1b /* predict taken */
 	fic,m		%r23(%sr4, %r26)
 
 	sync
@@ -1206,7 +1373,33 @@ ENTRY_CFI(flush_kernel_icache_range_asm)
 	ldo		-1(%r23), %r21
 	ANDCM		%r26, %r21, %r26
 
-1:      cmpb,COND(<<),n	%r26, %r25, 1b
+#ifdef CONFIG_64BIT
+	depd,z		%r23, 59, 60, %r21
+#else
+	depw,z		%r23, 27, 28, %r21
+#endif
+	add		%r26, %r21, %r22
+	cmpb,COND(>>),n	%r22, %r25, 2f /* predict not taken */
+1:	add		%r22, %r21, %r22
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	fic,m		%r23(%sr4, %r26)
+	cmpb,COND(<<=)	%r22, %r25, 1b /* predict taken */
+	fic,m		%r23(%sr4, %r26)
+
+2:	cmpb,COND(>>),n	%r25, %r26, 2b /* predict taken */
 	fic,m		%r23(%sr4, %r26)
 
 	sync
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 5f7e57fcaeef..c7ac3eb439d7 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -633,8 +633,7 @@ cas_action:
 	sub,<>	%r28, %r25, %r0
 2:	stw	%r24, 0(%r26)
 	/* Free lock */
-	sync
-	stw	%r20, 0(%sr2,%r20)
+	stw,ma	%r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
 	/* Clear thread register indicator */
 	stw	%r0, 4(%sr2,%r20)
@@ -648,8 +647,7 @@ cas_action:
 3:		
 	/* Error occurred on load or store */
 	/* Free lock */
-	sync
-	stw	%r20, 0(%sr2,%r20)
+	stw,ma	%r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
 	stw	%r0, 4(%sr2,%r20)
 #endif
@@ -850,8 +848,7 @@ cas2_action:
 
 cas2_end:
 	/* Free lock */
-	sync
-	stw	%r20, 0(%sr2,%r20)
+	stw,ma	%r20, 0(%sr2,%r20)
 	/* Enable interrupts */
 	ssm	PSW_SM_I, %r0
 	/* Return to userspace, set no error */
@@ -861,8 +858,7 @@ cas2_end:
 22:
 	/* Error occurred on load or store */
 	/* Free lock */
-	sync
-	stw	%r20, 0(%sr2,%r20)
+	stw,ma	%r20, 0(%sr2,%r20)
 	ssm	PSW_SM_I, %r0
 	ldo	1(%r0),%r28
 	b	lws_exit
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 364e71861bfd..4dfc19500311 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1619,7 +1619,7 @@ static int resp_start_stop(struct scsi_cmnd *scp,
 	}
 	stop = !(cmd[4] & 1);
 	changing = atomic_read(&devip->stopped) == !stop;
-	atomic_xchg(&devip->stopped, stop);
+	(void) atomic_xchg(&devip->stopped, stop);
 	if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
 		return SDEG_RES_IMMED_MASK;
 	else
