[PATCH] do not use drop_mmu_context to flusing other task's icache

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The c-r4k.c and c-sb1.c use drop_mmu_context() to flushing virtually
tagged icache, but this would not work for flushing other task's
icache.  The ptrace() (and copy_to_user_page()) is the case.  Use
indexed flush for such cases.

Signed-off-by: Atsushi Nemoto <anemo@xxxxxxxxxxxxx>

diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4e14982..2d729f6 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -475,7 +475,7 @@ static inline void local_r4k_flush_cache
 		}
 	}
 	if (exec) {
-		if (cpu_has_vtag_icache) {
+		if (cpu_has_vtag_icache && mm == current->active_mm) {
 			int cpu = smp_processor_id();
 
 			if (cpu_context(cpu, mm) != 0)
@@ -599,7 +599,7 @@ static inline void local_r4k_flush_icach
 	 * We're not sure of the virtual address(es) involved here, so
 	 * we have to flush the entire I-cache.
 	 */
-	if (cpu_has_vtag_icache) {
+	if (cpu_has_vtag_icache && vma->vm_mm == current->active_mm) {
 		int cpu = smp_processor_id();
 
 		if (cpu_context(cpu, vma->vm_mm) != 0)
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c
index 4bd9ad8..16bad7c 100644
--- a/arch/mips/mm/c-sb1.c
+++ b/arch/mips/mm/c-sb1.c
@@ -155,6 +155,26 @@ static inline void __sb1_flush_icache_al
 }
 
 /*
+ * Invalidate a range of the icache.  The addresses are virtual, and
+ * the cache is virtually indexed and tagged.  However, we don't
+ * necessarily have the right ASID context, so use index ops instead
+ * of hit ops.
+ */
+static inline void __sb1_flush_icache_range(unsigned long start,
+	unsigned long end)
+{
+	start &= ~(icache_line_size - 1);
+	end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
+
+	while (start != end) {
+		cache_set_op(Index_Invalidate_I, start & icache_index_mask);
+		start += icache_line_size;
+	}
+	mispredict();
+	sync();
+}
+
+/*
  * Flush the icache for a given physical page.  Need to writeback the
  * dcache first, then invalidate the icache.  If the page isn't
  * executable, nothing is required.
@@ -173,8 +193,11 @@ #endif
 	/*
 	 * Bumping the ASID is probably cheaper than the flush ...
 	 */
-	if (cpu_context(cpu, vma->vm_mm) != 0)
-		drop_mmu_context(vma->vm_mm, cpu);
+	if (vma->vm_mm == current->active_mm) {
+		if (cpu_context(cpu, vma->vm_mm) != 0)
+			drop_mmu_context(vma->vm_mm, cpu);
+	} else
+		__sb1_flush_icache_range(addr, addr + PAGE_SIZE);
 }
 
 #ifdef CONFIG_SMP
@@ -210,26 +233,6 @@ void sb1_flush_cache_page(struct vm_area
 	__attribute__((alias("local_sb1_flush_cache_page")));
 #endif
 
-/*
- * Invalidate a range of the icache.  The addresses are virtual, and
- * the cache is virtually indexed and tagged.  However, we don't
- * necessarily have the right ASID context, so use index ops instead
- * of hit ops.
- */
-static inline void __sb1_flush_icache_range(unsigned long start,
-	unsigned long end)
-{
-	start &= ~(icache_line_size - 1);
-	end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
-
-	while (start != end) {
-		cache_set_op(Index_Invalidate_I, start & icache_index_mask);
-		start += icache_line_size;
-	}
-	mispredict();
-	sync();
-}
-
 
 /*
  * Invalidate all caches on this CPU
@@ -326,9 +329,12 @@ #endif
 	 * If there's a context, bump the ASID (cheaper than a flush,
 	 * since we don't know VAs!)
 	 */
-	if (cpu_context(cpu, vma->vm_mm) != 0) {
-		drop_mmu_context(vma->vm_mm, cpu);
-	}
+	if (vma->vm_mm == current->active_mm) {
+		if (cpu_context(cpu, vma->vm_mm) != 0)
+			drop_mmu_context(vma->vm_mm, cpu);
+	} else
+		__sb1_flush_icache_range(start, start + PAGE_SIZE);
+
 }
 
 #ifdef CONFIG_SMP


[Index of Archives]     [Linux MIPS Home]     [LKML Archive]     [Linux ARM Kernel]     [Linux ARM]     [Linux]     [Git]     [Yosemite News]     [Linux SCSI]     [Linux Hams]

  Powered by Linux