Re: linux-next: manual merge of the rr tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Monday 05 January 2009 14:02:39 Stephen Rothwell wrote:
> Similarly with init/main.c, include/linux/percpu.h,
> include/asm-generic/percpu.h and arch/x86/include/asm/percpu.h (though
> against different commits/trees, of course).

OK, here's the merge as I did it.  I've also attached a tarball of the files
post-merge.

Cheers,
Rusty.

diff --cc arch/x86/include/asm/percpu.h
index 5c0ef60,313b3d6..0000000
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
diff --cc include/asm-generic/percpu.h
index 627b446,1c02250..0000000
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@@ -117,11 -75,10 +117,15 @@@ extern void setup_per_cpu_areas(void)
  #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))
  #define __get_cpu_var(var)			per_cpu_var(var)
  #define __raw_get_cpu_var(var)			per_cpu_var(var)
 +#define read_percpu_var(var)			(0, per_cpu_var(var))
 +#define per_cpu_ptr(ptr, cpu)			(ptr)
 +#define __get_cpu_ptr(ptr)			(ptr)
 +#define __raw_get_cpu_ptr(ptr)			(ptr)
 +#define read_percpu_ptr(ptr)			(0, *(ptr))
+ #ifndef SHIFT_PERCPU_PTR
+ # define SHIFT_PERCPU_PTR(__p, __offset)		(__p)
+ #endif
+ #define per_cpu_offset(x)			0L
  
  #endif	/* SMP */
  
diff --cc include/linux/percpu.h
index dad0070,e1f8708..0000000
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@@ -26,25 -26,46 +26,40 @@@
  
  #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)			\
  	__attribute__((__section__(".data.percpu.page_aligned")))	\
 -	PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 +	PER_CPU_ATTRIBUTES __typeof__(type) __percpu name
+ 
+ #ifdef CONFIG_HAVE_ZERO_BASED_PER_CPU
+ #define DEFINE_PER_CPU_FIRST(type, name)				\
+ 	__attribute__((__section__(".data.percpu.first")))		\
 -	PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
++	PER_CPU_ATTRIBUTES __typeof__(type) __percpu name
  #else
+ #define DEFINE_PER_CPU_FIRST(type, name)				\
+ 	DEFINE_PER_CPU(type, name)
+ #endif
+ 
+ #else /* !CONFIG_SMP */
+ 
  #define DEFINE_PER_CPU(type, name)					\
 -	PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 +	PER_CPU_ATTRIBUTES __typeof__(type) __percpu name
  
  #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)		      \
  	DEFINE_PER_CPU(type, name)
  
  #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)		      \
  	DEFINE_PER_CPU(type, name)
- #endif
+ 
+ #define DEFINE_PER_CPU_FIRST(type, name)				\
+ 	DEFINE_PER_CPU(type, name)
+ 
+ #endif /* !CONFIG_SMP */
  
 -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
 -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
 +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
 +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
  
 -/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
  #ifndef PERCPU_ENOUGH_ROOM
 -#ifdef CONFIG_MODULES
 -#define PERCPU_MODULE_RESERVE	8192
 -#else
 -#define PERCPU_MODULE_RESERVE	0
 -#endif
 +extern unsigned int percpu_reserve;
  
 -#define PERCPU_ENOUGH_ROOM						\
 -	(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
 +#define PERCPU_ENOUGH_ROOM (__per_cpu_end - __per_cpu_start + percpu_reserve)
  #endif	/* PERCPU_ENOUGH_ROOM */
  
  /*
diff --cc init/main.c
index 8a2d82c,d1c5b8b..0000000
--- a/init/main.c
+++ b/init/main.c
diff --cc kernel/module.c
index 99d1756,9712c52..0000000
--- a/kernel/module.c
+++ b/kernel/module.c
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index fa7f356..e77284f 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -61,7 +61,7 @@ void *__alloc_percpu(unsigned long size, unsigned long align)
 	if (WARN_ON(align > PAGE_SIZE))
 		align = PAGE_SIZE;
 
-	ptr = __per_cpu_start;
+	ptr = __per_cpu_load;
 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
 		/* Extra for alignment requirement. */
 		extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
@@ -107,7 +107,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
 void free_percpu(void *freeme)
 {
 	unsigned int i;
-	void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
+	void *ptr = __per_cpu_load + block_size(pcpu_size[0]);
 
 	if (!freeme)
 		return;
@@ -147,7 +147,7 @@ void __init percpu_alloc_init(void)
 	pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
 			    GFP_KERNEL);
 	/* Static in-kernel percpu data (used). */
-	pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
+	pcpu_size[0] = -__per_cpu_size;
 	/* Free room. */
 	pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
 	BUG_ON(pcpu_size[1] < 0);

Attachment: files.tar.bz2
Description: application/bzip-compressed-tar


[Index of Archives]     [Linux Kernel]     [Linux USB Development]     [Yosemite News]     [Linux SCSI]

  Powered by Linux