This is Peter Horton's IDE patch for the Cobalt. From the notes in
Peter's file.
PIO "in" transfers can cause D-cache lines to be allocated
to the data being read. If the target is the page cache then
the kernel can create a user space mapping of the same page
without flushing it from the D-cache. This has large potential
to create cache aliases. The Cobalts seem to trigger this
problem easily.
--
----
Jim Gifford
maillist@xxxxxxxxx
diff -Naur linux-mips-2.6.14.orig/include/asm-mips/cobalt/ide.h linux-mips-2.6.14/include/asm-mips/cobalt/ide.h
--- linux-mips-2.6.14.orig/include/asm-mips/cobalt/ide.h 1969-12-31 16:00:00.000000000 -0800
+++ linux-mips-2.6.14/include/asm-mips/cobalt/ide.h 2005-11-17 14:58:19.000000000 -0800
@@ -0,0 +1,83 @@
+
+/*
+ * PIO "in" transfers can cause D-cache lines to be allocated
+ * to the data being read. If the target is the page cache then
+ * the kernel can create a user space mapping of the same page
+ * without flushing it from the D-cache. This has large potential
+ * to create cache aliases. The Cobalts seem to trigger this
+ * problem easily.
+ *
+ * MIPs doesn't have a flush_dcache_range() so we roll
+ * our own.
+ *
+ * -- pdh
+ */
+
+#define MAX_HWIFS 2
+
+#include <asm/r4kcache.h>
+
+static inline void __flush_dcache(void)
+{
+ unsigned long dc_size, dc_line, addr, end;
+
+ dc_size = current_cpu_data.dcache.ways << current_cpu_data.dcache.waybit;
+ dc_line = current_cpu_data.dcache.linesz;
+
+ addr = CKSEG0;
+ end = addr + dc_size;
+
+ for (; addr < end; addr += dc_line)
+ flush_dcache_line_indexed(addr);
+}
+
+static inline void __flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long dc_size, dc_line, addr;
+
+ dc_size = current_cpu_data.dcache.ways << current_cpu_data.dcache.waybit;
+ dc_line = current_cpu_data.dcache.linesz;
+
+ addr = start & ~(dc_line - 1);
+ end += dc_line - 1;
+
+ if (end - addr < dc_size)
+ for (; addr < end; addr += dc_line)
+ flush_dcache_line(addr);
+ else
+ __flush_dcache();
+}
+
+static inline void __ide_insw(unsigned long port, void *addr, unsigned int count)
+{
+ insw(port, addr, count);
+
+ __flush_dcache_range((unsigned long) addr, (unsigned long) addr + count * 2);
+}
+
+static inline void __ide_insl(unsigned long port, void *addr, unsigned int count)
+{
+ insl(port, addr, count);
+
+ __flush_dcache_range((unsigned long) addr, (unsigned long) addr + count * 4);
+}
+
+static inline void __ide_mm_insw(volatile void __iomem *port, void *addr, unsigned int count)
+{
+ readsw(port, addr, count);
+
+ __flush_dcache_range((unsigned long) addr, (unsigned long) addr + count * 2);
+}
+
+static inline void __ide_mm_insl(volatile void __iomem *port, void *addr, unsigned int count)
+{
+ readsl(port, addr, count);
+
+ __flush_dcache_range((unsigned long) addr, (unsigned long) addr + count * 4);
+}
+
+#define insw __ide_insw
+#define insl __ide_insl
+
+#define __ide_mm_outsw writesw
+#define __ide_mm_outsl writesl