This commit adds basic mmu support, ie: - DMA cache handling is not supported - Remapping memory region also The current mmu setting is: - 4KB granularity - 3 level lookup (skipping L0) - 33 bits per VA This is based on coreboot and u-boot mmu configuration. Signed-off-by: Raphael Poggi <poggi.raph@xxxxxxxxx> --- arch/arm/cpu/Makefile | 4 +- arch/arm/cpu/mmu.h | 54 +++++++ arch/arm/cpu/mmu_64.c | 333 +++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/mmu.h | 14 +- arch/arm/include/asm/pgtable64.h | 140 ++++++++++++++++ 5 files changed, 540 insertions(+), 5 deletions(-) create mode 100644 arch/arm/cpu/mmu_64.c create mode 100644 arch/arm/include/asm/pgtable64.h diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile index 86a4a90..7cf5da7 100644 --- a/arch/arm/cpu/Makefile +++ b/arch/arm/cpu/Makefile @@ -24,8 +24,8 @@ endif obj-$(CONFIG_CMD_ARM_CPUINFO) += cpuinfo.o obj-$(CONFIG_CMD_ARM_MMUINFO) += mmuinfo.o obj-$(CONFIG_OFDEVICE) += dtb.o -obj-$(CONFIG_MMU) += mmu.o cache.o mmu-early.o -pbl-$(CONFIG_MMU) += mmu-early.o +obj-$(CONFIG_MMU) += cache.o + ifeq ($(CONFIG_MMU),) obj-y += no-mmu.o endif diff --git a/arch/arm/cpu/mmu.h b/arch/arm/cpu/mmu.h index 79ebc80..186d408 100644 --- a/arch/arm/cpu/mmu.h +++ b/arch/arm/cpu/mmu.h @@ -1,6 +1,60 @@ #ifndef __ARM_MMU_H #define __ARM_MMU_H +#ifdef CONFIG_CPU_64v8 + +#define TCR_FLAGS (TCR_TG0_4K | \ + TCR_SHARED_OUTER | \ + TCR_SHARED_INNER | \ + TCR_IRGN_WBWA | \ + TCR_ORGN_WBWA | \ + TCR_T0SZ(BITS_PER_VA)) + +#ifndef __ASSEMBLY__ + +static inline void set_ttbr_tcr_mair(int el, uint64_t table, uint64_t tcr, uint64_t attr) +{ + asm volatile("dsb sy"); + if (el == 1) { + asm volatile("msr ttbr0_el1, %0" : : "r" (table) : "memory"); + asm volatile("msr tcr_el1, %0" : : "r" (tcr) : "memory"); + asm volatile("msr mair_el1, %0" : : "r" (attr) : "memory"); + } else if (el == 2) { + asm volatile("msr ttbr0_el2, %0" : : "r" (table) : "memory"); + asm volatile("msr tcr_el2, %0" : : "r" (tcr) : "memory"); + asm volatile("msr mair_el2, %0" : : "r" (attr) : "memory"); + } else if (el == 3) { + asm volatile("msr ttbr0_el3, %0" : : "r" (table) : "memory"); + asm volatile("msr tcr_el3, %0" : : "r" (tcr) : "memory"); + asm volatile("msr mair_el3, %0" : : "r" (attr) : "memory"); + } else { + hang(); + } + asm volatile("isb"); +} + +static inline uint64_t get_ttbr(int el) +{ + uint64_t val; + if (el == 1) { + asm volatile("mrs %0, ttbr0_el1" : "=r" (val)); + } else if (el == 2) { + asm volatile("mrs %0, ttbr0_el2" : "=r" (val)); + } else if (el == 3) { + asm volatile("mrs %0, ttbr0_el3" : "=r" (val)); + } else { + hang(); + } + + return val; +} + +void mmu_early_enable(uint64_t membase, uint64_t memsize, uint64_t _ttb); + +#endif + +#endif /* CONFIG_CPU_64v8 */ + #ifdef CONFIG_MMU void __mmu_cache_on(void); void __mmu_cache_off(void); diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c new file mode 100644 index 0000000..256aa7e --- /dev/null +++ b/arch/arm/cpu/mmu_64.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2009-2013 Sascha Hauer <s.hauer@xxxxxxxxxxxxxx>, Pengutronix + * Copyright (c) 2016 Raphaël Poggi <poggi.raph@xxxxxxxxx> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "mmu: " fmt + +#include <common.h> +#include <dma-dir.h> +#include <init.h> +#include <mmu.h> +#include <errno.h> +#include <linux/sizes.h> +#include <asm/memory.h> +#include <asm/barebox-arm.h> +#include <asm/system.h> +#include <asm/cache.h> +#include <memory.h> +#include <asm/system_info.h> + +#include "mmu.h" + +static uint64_t *ttb; +static int free_idx; + +static void arm_mmu_not_initialized_error(void) +{ + /* + * This means: + * - one of the MMU functions like dma_alloc_coherent + * or remap_range is called too early, before the MMU is initialized + * - Or the MMU initialization has failed earlier + */ + panic("MMU not initialized\n"); +} + + +/* + * Do it the simple way for now and invalidate the entire + * tlb + */ +static inline void tlb_invalidate(void) +{ + unsigned int el = current_el(); + + dsb(); + + if (el == 1) + __asm__ __volatile__("tlbi alle1\n\t" : : : "memory"); + else if (el == 2) + __asm__ __volatile__("tlbi alle2\n\t" : : : "memory"); + else if (el == 3) + __asm__ __volatile__("tlbi alle3\n\t" : : : "memory"); + + dsb(); + isb(); +} + +static int level2shift(int level) +{ + /* Page is 12 bits wide, every level translates 9 bits */ + return (12 + 9 * (3 - level)); +} + +static uint64_t level2mask(int level) +{ + uint64_t mask = -EINVAL; + + if (level == 1) + mask = L1_ADDR_MASK; + else if (level == 2) + mask = L2_ADDR_MASK; + else if (level == 3) + mask = L3_ADDR_MASK; + + return mask; +} + +static int pte_type(uint64_t *pte) +{ + return *pte & PMD_TYPE_MASK; +} + +static void set_table(uint64_t *pt, uint64_t *table_addr) +{ + uint64_t val; + + val = PMD_TYPE_TABLE | (uint64_t)table_addr; + *pt = val; +} + +static uint64_t *create_table(void) +{ + uint64_t *new_table = ttb + free_idx * GRANULE_SIZE; + + /* Mark all entries as invalid */ + memset(new_table, 0, GRANULE_SIZE); + + free_idx++; + + return new_table; +} + +static uint64_t *get_level_table(uint64_t *pte) +{ + uint64_t *table = (uint64_t *)(*pte & XLAT_ADDR_MASK); + + if (pte_type(pte) != PMD_TYPE_TABLE) { + table = create_table(); + set_table(pte, table); + } + + return table; +} + +static uint64_t *find_pte(uint64_t addr) +{ + uint64_t *pte; + uint64_t block_shift; + uint64_t idx; + int i; + + pte = ttb; + + for (i = 1; i < 4; i++) { + block_shift = level2shift(i); + idx = (addr & level2mask(i)) >> block_shift; + pte += idx; + + if ((pte_type(pte) != PMD_TYPE_TABLE) || (block_shift <= GRANULE_SIZE_SHIFT)) + break; + else + pte = (uint64_t *)(*pte & XLAT_ADDR_MASK); + } + + return pte; +} + +static void map_region(uint64_t virt, uint64_t phys, uint64_t size, uint64_t attr) +{ + uint64_t block_size; + uint64_t block_shift; + uint64_t *pte; + uint64_t idx; + uint64_t addr; + uint64_t *table; + int level; + + if (!ttb) + arm_mmu_not_initialized_error(); + + addr = virt; + + attr &= ~(PMD_TYPE_SECT); + + while (size) { + table = ttb; + for (level = 1; level < 4; level++) { + block_shift = level2shift(level); + idx = (addr & level2mask(level)) >> block_shift; + block_size = (1 << block_shift); + + pte = table + idx; + + if (level == 3) + attr |= PTE_TYPE_PAGE; + else + attr |= PMD_TYPE_SECT; + + if (size >= block_size && IS_ALIGNED(addr, block_size)) { + *pte = phys | attr; + addr += block_size; + phys += block_size; + size -= block_size; + break; + + } + + table = get_level_table(pte); + } + + } +} + +static void create_sections(uint64_t virt, uint64_t phys, uint64_t size_m, uint64_t flags) +{ + + map_region(virt, phys, size_m, flags); +} + +void *map_io_sections(unsigned long phys, void *_start, size_t size) +{ + + map_region((uint64_t)_start, phys, (uint64_t)size, UNCACHED_MEM); + + tlb_invalidate(); + return _start; +} + + +int arch_remap_range(void *_start, size_t size, unsigned flags) +{ + map_region((uint64_t)_start, (uint64_t)_start, (uint64_t)size, flags); + + return 0; +} + +/* + * Prepare MMU for usage enable it. + */ +static int mmu_init(void) +{ + struct memory_bank *bank; + + if (list_empty(&memory_banks)) + /* + * If you see this it means you have no memory registered. + * This can be done either with arm_add_mem_device() in an + * initcall prior to mmu_initcall or via devicetree in the + * memory node. + */ + panic("MMU: No memory bank found! Cannot continue\n"); + + if (get_cr() & CR_M) { + ttb = (uint64_t *)get_ttbr(1); + if (!request_sdram_region("ttb", (unsigned long)ttb, SZ_16K)) + /* + * This can mean that: + * - the early MMU code has put the ttb into a place + * which we don't have inside our available memory + * - Somebody else has occupied the ttb region which means + * the ttb will get corrupted. + */ + pr_crit("Critical Error: Can't request SDRAM region for ttb at %p\n", + ttb); + } else { + ttb = memalign(0x1000, SZ_16K); + free_idx = 1; + + memset(ttb, 0, GRANULE_SIZE); + + set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, UNCACHED_MEM); + } + + pr_debug("ttb: 0x%p\n", ttb); + + /* create a flat mapping using 1MiB sections */ + create_sections(0, 0, GRANULE_SIZE, UNCACHED_MEM); + + /* + * First remap sdram cached using sections. + * This is to speed up the generation of 2nd level page tables + * below + */ + for_each_memory_bank(bank) + create_sections(bank->start, bank->start, bank->size, CACHED_MEM); + + return 0; +} +mmu_initcall(mmu_init); + +void mmu_enable(void) +{ + if (!ttb) + arm_mmu_not_initialized_error(); + + if (!(get_cr() & CR_M)) { + + isb(); + set_cr(get_cr() | CR_M | CR_C | CR_I); + } +} + +void mmu_disable(void) +{ + unsigned int cr; + + if (!ttb) + arm_mmu_not_initialized_error(); + + cr = get_cr(); + cr &= ~(CR_M | CR_C | CR_I); + + tlb_invalidate(); + + dsb(); + isb(); + + set_cr(cr); + + dsb(); + isb(); +} + +void mmu_early_enable(uint64_t membase, uint64_t memsize, uint64_t _ttb) +{ + ttb = (uint64_t *)_ttb; + + memset(ttb, 0, GRANULE_SIZE); + free_idx = 1; + + set_ttbr_tcr_mair(current_el(), (uint64_t)ttb, TCR_FLAGS, UNCACHED_MEM); + + create_sections(0, 0, 4096, UNCACHED_MEM); + + create_sections(membase, membase, memsize, CACHED_MEM); + + isb(); + set_cr(get_cr() | CR_M); +} + +unsigned long virt_to_phys(volatile void *virt) +{ + return (unsigned long)virt; +} + +void *phys_to_virt(unsigned long phys) +{ + return (void *)phys; +} diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 8de6544..f68ab37 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,16 +6,24 @@ #include <malloc.h> #include <xfuncs.h> +#ifdef CONFIG_CPU_64v8 +#include <asm/pgtable64.h> + +#define DEV_MEM (PMD_ATTRINDX(MT_DEVICE_nGnRnE) | PMD_SECT_AF | PMD_TYPE_SECT) +#define CACHED_MEM (PMD_ATTRINDX(MT_NORMAL) | PMD_SECT_S | PMD_SECT_AF | PMD_TYPE_SECT) +#define UNCACHED_MEM (PMD_ATTRINDX(MT_NORMAL_NC) | PMD_SECT_S | PMD_SECT_AF | PMD_TYPE_SECT) +#else #include <asm/pgtable.h> #define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT) #define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED) +#endif + + struct arm_memory; -static inline void mmu_enable(void) -{ -} +void mmu_enable(void); void mmu_disable(void); static inline void arm_create_section(unsigned long virt, unsigned long phys, int size_m, unsigned int flags) diff --git a/arch/arm/include/asm/pgtable64.h b/arch/arm/include/asm/pgtable64.h new file mode 100644 index 0000000..20bea5b --- /dev/null +++ b/arch/arm/include/asm/pgtable64.h @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGTABLE64_H +#define __ASM_PGTABLE64_H + +#define UL(x) _AC(x, UL) + +#define UNUSED_DESC 0x6EbAAD0BBADbA6E0 + +#define VA_START 0x0 +#define BITS_PER_VA 33 + +/* Granule size of 4KB is being used */ +#define GRANULE_SIZE_SHIFT 12 +#define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT) +#define XLAT_ADDR_MASK ((1UL << BITS_PER_VA) - GRANULE_SIZE) +#define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1) + +#define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3) +#define L1_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 2) +#define L2_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 1) +#define L3_ADDR_SHIFT (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 0) + + +#define L1_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L1_ADDR_SHIFT) +#define L2_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L2_ADDR_SHIFT) +#define L3_ADDR_MASK (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L3_ADDR_SHIFT) + +/* These macros give the size of the region addressed by each entry of a xlat + table at any given level */ +#define L3_XLAT_SIZE (1UL << L3_ADDR_SHIFT) +#define L2_XLAT_SIZE (1UL << L2_ADDR_SHIFT) +#define L1_XLAT_SIZE (1UL << L1_ADDR_SHIFT) + +#define GRANULE_MASK GRANULE_SIZE + + +/* + * Level 2 descriptor (PMD). + */ +#define PMD_TYPE_MASK (3 << 0) +#define PMD_TYPE_FAULT (0 << 0) +#define PMD_TYPE_TABLE (3 << 0) +#define PMD_TYPE_SECT (1 << 0) +#define PMD_TABLE_BIT (1 << 1) + +/* + * Section + */ +#define PMD_SECT_VALID (1 << 0) +#define PMD_SECT_USER (1 << 6) /* AP[1] */ +#define PMD_SECT_RDONLY (1 << 7) /* AP[2] */ +#define PMD_SECT_S (3 << 8) +#define PMD_SECT_AF (1 << 10) +#define PMD_SECT_NG (1 << 11) +#define PMD_SECT_CONT (1 << 52) +#define PMD_SECT_PXN (1 << 53) +#define PMD_SECT_UXN (1 << 54) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define PMD_ATTRINDX(t) ((t) << 2) +#define PMD_ATTRINDX_MASK (7 << 2) + +/* + * Level 3 descriptor (PTE). + */ +#define PTE_TYPE_MASK (3 << 0) +#define PTE_TYPE_FAULT (0 << 0) +#define PTE_TYPE_PAGE (3 << 0) +#define PTE_TABLE_BIT (1 << 1) +#define PTE_USER (1 << 6) /* AP[1] */ +#define PTE_RDONLY (1 << 7) /* AP[2] */ +#define PTE_SHARED (3 << 8) /* SH[1:0], inner shareable */ +#define PTE_AF (1 << 10) /* Access Flag */ +#define PTE_NG (1 << 11) /* nG */ +#define PTE_DBM (1 << 51) /* Dirty Bit Management */ +#define PTE_CONT (1 << 52) /* Contiguous range */ +#define PTE_PXN (1 << 53) /* Privileged XN */ +#define PTE_UXN (1 << 54) /* User XN */ + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define PTE_ATTRINDX(t) ((t) << 2) +#define PTE_ATTRINDX_MASK (7 << 2) + +/* + * Memory types available. + */ +#define MT_DEVICE_nGnRnE 0 +#define MT_DEVICE_nGnRE 1 +#define MT_DEVICE_GRE 2 +#define MT_NORMAL_NC 3 +#define MT_NORMAL 4 +#define MT_NORMAL_WT 5 + +/* + * TCR flags. + */ +#define TCR_T0SZ(x) ((64 - (x)) << 0) +#define TCR_IRGN_NC (0 << 8) +#define TCR_IRGN_WBWA (1 << 8) +#define TCR_IRGN_WT (2 << 8) +#define TCR_IRGN_WBNWA (3 << 8) +#define TCR_IRGN_MASK (3 << 8) +#define TCR_ORGN_NC (0 << 10) +#define TCR_ORGN_WBWA (1 << 10) +#define TCR_ORGN_WT (2 << 10) +#define TCR_ORGN_WBNWA (3 << 10) +#define TCR_ORGN_MASK (3 << 10) +#define TCR_SHARED_NON (0 << 12) +#define TCR_SHARED_OUTER (2 << 12) +#define TCR_SHARED_INNER (3 << 12) +#define TCR_TG0_4K (0 << 14) +#define TCR_TG0_64K (1 << 14) +#define TCR_TG0_16K (2 << 14) +#define TCR_EL1_IPS_BITS (UL(3) << 32) /* 42 bits physical address */ +#define TCR_EL2_IPS_BITS (3 << 16) /* 42 bits physical address */ +#define TCR_EL3_IPS_BITS (3 << 16) /* 42 bits physical address */ + +#define TCR_EL1_RSVD (1 << 31) +#define TCR_EL2_RSVD (1 << 31 | 1 << 23) +#define TCR_EL3_RSVD (1 << 31 | 1 << 23) + +#endif -- 2.1.0 _______________________________________________ barebox mailing list barebox@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/barebox