On Wed, 2009-01-07 at 17:41 -0800, David Daney wrote: > This is a preliminary patch to allow the kernel to run in mapped > address space via a wired TLB entry. Probably in a future version I > would factor out the OCTEON specific parts to a separate patch. Yes, please do the factoring. > diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig > index 780b520..d9c46a4 100644 > --- a/arch/mips/Kconfig > +++ b/arch/mips/Kconfig > @@ -1431,6 +1431,23 @@ config PAGE_SIZE_64KB > > endchoice > > +config MAPPED_KERNEL > + bool "Mapped kernel" > + help > + Select this option if you want the kernel's code and data to > + be in mapped memory. The kernel will be mapped using a > + single wired TLB entry, thus reducing the number of > + available TLB entries by one. Kernel modules will be able > + to use a more efficient calling convention. This is currently only supported on 64-bit processors, so this should depend on CONFIG_64BIT. > diff --git a/arch/mips/Makefile b/arch/mips/Makefile > index 0bc2120..5468f6d 100644 > --- a/arch/mips/Makefile > +++ b/arch/mips/Makefile ... > @@ -662,7 +670,7 @@ OBJCOPYFLAGS += --remove-section=.reginfo > > CPPFLAGS_vmlinux.lds := \ > $(KBUILD_CFLAGS) \ > - -D"LOADADDR=$(load-y)" \ > + -D"LOADADDR=$(load-y)" $(PHYS_LOAD_ADDRESS) \ > -D"JIFFIES=$(JIFFIES)" \ > -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)" It seems more consistent to just eliminate PHYS_LOAD_ADDRESS entirely and add a line here reading: -D"PHYSADDR=0x$(CONFIG_PHYS_LOAD_ADDRESS)" \ > > diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h > index 0b2b5eb..bf36d82 100644 > --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h > +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h > @@ -27,6 +27,56 @@ > # a3 = address of boot descriptor block > .set push > .set arch=octeon > +#ifdef CONFIG_MAPPED_KERNEL > + # Set up the TLB index 0 for wired access to kernel. > + # Assume we were loaded with sufficient alignment so that we > + # can cover the image with two pages. This seems like a pretty big assumption. Possible ways to handle this: o Generalize to handle n pages. o Hang in a loop here if the assumption is not met o Check later on whether the assumption was true and print a message. I'm not really sure how to do this last one, though. > + dla v0, _end > + dla v1, _text > + dsubu v0, v0, v1 # size of image > + move v1, zero > + li t1, -1 # shift count. > +1: dsrl v0, v0, 1 # mask into v1 > + dsll v1, v1, 1 > + daddiu t1, t1, 1 > + ori v1, v1, 1 > + bne v0, zero, 1b > + daddiu t2, t1, -6 > + mtc0 v1, $5, 0 # PageMask > + dla t3, 0xffffffffc0000000 # kernel address I think this should be CKSSEG rather than a magic constant. > + dmtc0 t3, $10, 0 # EntryHi > + bal 1f > +1: dla v0, 0x7fffffff Another magic constant; don't know if there is already a define that really applies, though. Perhaps add something to asm-mips/inst.h? > diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c > index 1055348..b44bcf8 100644 > --- a/arch/mips/kernel/traps.c > +++ b/arch/mips/kernel/traps.c > @@ -49,6 +49,8 @@ > #include <asm/stacktrace.h> > #include <asm/irq.h> > > +#include "../mm/uasm.h" This looks like it would be a good idea to consider moving uasm.h to include/asm-mips, or possibly splitting it into two header files, one of which would move to include/asm-mips. > + > extern void check_wait(void); > extern asmlinkage void r4k_wait(void); > extern asmlinkage void rollback_handle_int(void); > @@ -1295,9 +1297,18 @@ void *set_except_vector(int n, void *addr) > > exception_handlers[n] = handler; > if (n == 0 && cpu_has_divec) { > - *(u32 *)(ebase + 0x200) = 0x08000000 | > - (0x03ffffff & (handler >> 2)); > - local_flush_icache_range(ebase + 0x200, ebase + 0x204); > + unsigned long jump_mask = ~((1 << 28) - 1); The 28 is a magic constant specifying the number of bits of the offset in a jump instruction. Perhaps define jump_mask in asm-mips/inst.h since it is related to the instruction format? > + u32 *buf = (u32 *)(ebase + 0x200); > + unsigned int k0 = 26; You are using k0 as a constant by defining it as a variable. You could just have a #define here, but my suggestion is that it would be better to add defines to asm-mips/inst.h (something like "#define REG_K0 26" might be suitable for meeting this particular need) > + if((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { > + uasm_i_j(&buf, handler & jump_mask); > + uasm_i_nop(&buf); > + } else { > + UASM_i_LA(&buf, k0, handler); > + uasm_i_jr(&buf, k0); > + uasm_i_nop(&buf); > + } > + local_flush_icache_range(ebase + 0x200, (unsigned long)buf); > } > return (void *)old_handler; > } > /* > @@ -1670,9 +1683,9 @@ void __init trap_init(void) > return; /* Already done */ > #endif > > - if (cpu_has_veic || cpu_has_vint) > + if (cpu_has_veic || cpu_has_vint) { > ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64); > - else { > + } else { Checkpatch will complain about this, and it doesn't really add value to make the change. > diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c > index 1417c64..0070aa0 100644 > --- a/arch/mips/mm/page.c > +++ b/arch/mips/mm/page.c > @@ -687,3 +687,9 @@ void copy_page(void *to, void *from) > } > > #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */ > + > +#ifdef CONFIG_MAPPED_KERNEL > +/* Initialized so it is not clobbered when .bss is zeroed. */ > +unsigned long phys_to_kernel_offset = 1; > +unsigned long kernel_image_end = 1; > +#endif Clearly there is some magic happening here, but the such wizardry needs more documentation. I can deduce that these must be overwritten before we get to kernel_entry; who sets these? I don't know for sure what kernel_image_end is, but I am guessing that it is the physical address of the end of the kernel. If so, you can eliminate it as a piece of magic by calculating it at run time as the sum of the address of the start of the kernel and the size. -- David VomLehn, dvomlehn@xxxxxxxxx - - - - - Cisco - - - - - This e-mail and any attachments may contain information which is confidential, proprietary, privileged or otherwise protected by law. The information is solely intended for the named addressee (or a person responsible for delivering it to the addressee). If you are not the intended recipient of this message, you are not authorized to read, print, retain, copy or disseminate this message or any part of it. If you have received this e-mail in error, please notify the sender immediately by return e-mail and delete it from your computer.