It's good for me. Acked-by: Yoichi Yuasa <yoichi_yuasa@xxxxxxxxxxxxxx> On Sat, 24 Jan 2009 19:10:55 +0000 Ralf Baechle <ralf@xxxxxxxxxxxxxx> wrote: > On Sat, Jan 24, 2009 at 10:15:42PM +0900, Yoichi Yuasa wrote: > > Patch looks ok - but I think we also have to assume that the starting > address of the range might be miss-aligned, so how about this patch? > > Ralf > > Signed-off-by: Ralf Baechle <ralf@xxxxxxxxxxxxxx> > > diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c > index 56290a7..c43f4b2 100644 > --- a/arch/mips/mm/c-r4k.c > +++ b/arch/mips/mm/c-r4k.c > @@ -619,8 +619,20 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) > if (size >= scache_size) > r4k_blast_scache(); > else { > - cache_op(Hit_Writeback_Inv_SD, addr); > - cache_op(Hit_Writeback_Inv_SD, addr + size - 1); > + unsigned long lsize = cpu_scache_line_size(); > + unsigned long almask = ~(lsize - 1); > + > + /* > + * There is no clearly documented alignment requirement > + * for the cache instruction on MIPS processors and > + * some processors, among them the RM5200 and RM7000 > + * QED processors will throw an address error for cache > + * hit ops with insufficient alignment. Solved by > + * aligning the address to cache line size. > + */ > + cache_op(Hit_Writeback_Inv_SD, addr & almask); > + cache_op(Hit_Writeback_Inv_SD, > + (addr + size - 1) & almask); > blast_inv_scache_range(addr, addr + size); > } > return; > @@ -629,9 +641,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) > if (cpu_has_safe_index_cacheops && size >= dcache_size) { > r4k_blast_dcache(); > } else { > + unsigned long lsize = cpu_dcache_line_size(); > + unsigned long almask = ~(lsize - 1); > + > R4600_HIT_CACHEOP_WAR_IMPL; > - cache_op(Hit_Writeback_Inv_D, addr); > - cache_op(Hit_Writeback_Inv_D, addr + size - 1); > + cache_op(Hit_Writeback_Inv_D, addr & almask); > + cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); > blast_inv_dcache_range(addr, addr + size); > } >