On 02/11/2016 21:52, Andrew Jones wrote: > Ensure we're page aligned, add locking, just return if NULL is > passed to vfree(). > > Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx> > --- > lib/x86/asm/page.h | 2 ++ > lib/x86/vm.c | 44 +++++++++++++++++++++++++++++++++++++------- > 2 files changed, 39 insertions(+), 7 deletions(-) > > diff --git a/lib/x86/asm/page.h b/lib/x86/asm/page.h > index 5044a49ab0cc..dd999304f1f0 100644 > --- a/lib/x86/asm/page.h > +++ b/lib/x86/asm/page.h > @@ -16,6 +16,8 @@ > > #ifndef __ASSEMBLY__ > > +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) > + > #ifdef __x86_64__ > #define LARGE_PAGE_SIZE (512 * PAGE_SIZE) > #else > diff --git a/lib/x86/vm.c b/lib/x86/vm.c > index f7e778b3779c..baea17e7f475 100644 > --- a/lib/x86/vm.c > +++ b/lib/x86/vm.c > @@ -1,37 +1,54 @@ > #include "fwcfg.h" > #include "vm.h" > #include "libcflat.h" > +#include "asm/spinlock.h" > > +static struct spinlock heap_lock; > +static struct spinlock vm_lock; > static void *free = 0; > static void *vfree_top = 0; > > static void free_memory(void *mem, unsigned long size) > { > + assert(!((unsigned long)mem & ~PAGE_MASK)); > + > + spin_lock(&heap_lock); > + > + free = NULL; > + > while (size >= PAGE_SIZE) { > *(void **)mem = free; > free = mem; > mem += PAGE_SIZE; > size -= PAGE_SIZE; > } > + > + spin_unlock(&heap_lock); > } > > void *alloc_page() > { > void *p; > > + spin_lock(&heap_lock); > + > if (!free) > - return 0; > + return NULL; You must unlock heap_lock before return Laurent -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html