From: Boqun Feng <boqun.feng@xxxxxxxxx> Add __percpu modifier properly to help: 1. Differ pointers to actual structures with those to percpu structures, which could improve readability. 2. Prevent sparse from complaining about "different address spaces" Signed-off-by: Boqun Feng <boqun.feng@xxxxxxxxx> Signed-off-by: Waiman Long <Waiman.Long@xxxxxxx> --- include/linux/dlock-list.h | 18 ++++++++++-------- lib/dlock-list.c | 5 +++-- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/include/linux/dlock-list.h b/include/linux/dlock-list.h index 43355f8..a8e1fd2 100644 --- a/include/linux/dlock-list.h +++ b/include/linux/dlock-list.h @@ -108,7 +108,8 @@ static inline void init_dlock_list_node(struct dlock_list_node *node) node->lockptr = NULL; } -static inline void free_dlock_list_head(struct dlock_list_head **pdlock_head) +static inline void +free_dlock_list_head(struct dlock_list_head __percpu **pdlock_head) { free_percpu(*pdlock_head); *pdlock_head = NULL; @@ -117,7 +118,7 @@ static inline void free_dlock_list_head(struct dlock_list_head **pdlock_head) /* * Check if all the per-cpu lists are empty */ -static inline bool dlock_list_empty(struct dlock_list_head *dlock_head) +static inline bool dlock_list_empty(struct dlock_list_head __percpu *dlock_head) { int cpu; @@ -134,7 +135,7 @@ static inline bool dlock_list_empty(struct dlock_list_head *dlock_head) * Return: true if the entry is found, false if all the lists exhausted */ static __always_inline bool -__dlock_list_next_cpu(struct dlock_list_head *head, +__dlock_list_next_cpu(struct dlock_list_head __percpu *head, struct dlock_list_state *state) { if (state->lock) @@ -172,7 +173,7 @@ next_cpu: * * Return: true if the next entry is found, false if all the entries iterated */ -static inline bool dlock_list_iterate(struct dlock_list_head *head, +static inline bool dlock_list_iterate(struct dlock_list_head __percpu *head, struct dlock_list_state *state) { /* @@ -200,8 +201,9 @@ static inline bool dlock_list_iterate(struct dlock_list_head *head, * * Return: true if the next entry is found, false if all the entries iterated */ -static inline bool dlock_list_iterate_safe(struct dlock_list_head *head, - struct dlock_list_state *state) +static inline bool +dlock_list_iterate_safe(struct dlock_list_head __percpu *head, + struct dlock_list_state *state) { /* * Find next entry @@ -226,8 +228,8 @@ static inline bool dlock_list_iterate_safe(struct dlock_list_head *head, } extern void dlock_list_add(struct dlock_list_node *node, - struct dlock_list_head *head); + struct dlock_list_head __percpu *head); extern void dlock_list_del(struct dlock_list_node *node); -extern int init_dlock_list_head(struct dlock_list_head **pdlock_head); +extern int init_dlock_list_head(struct dlock_list_head __percpu **pdlock_head); #endif /* __LINUX_DLOCK_LIST_H */ diff --git a/lib/dlock-list.c b/lib/dlock-list.c index 84d4623..e1a1930 100644 --- a/lib/dlock-list.c +++ b/lib/dlock-list.c @@ -27,7 +27,7 @@ static struct lock_class_key dlock_list_key; /* * Initialize the per-cpu list head */ -int init_dlock_list_head(struct dlock_list_head **pdlock_head) +int init_dlock_list_head(struct dlock_list_head __percpu **pdlock_head) { struct dlock_list_head *dlock_head; int cpu; @@ -53,7 +53,8 @@ int init_dlock_list_head(struct dlock_list_head **pdlock_head) * function is called. However, deletion may be done by a different CPU. * So we still need to use a lock to protect the content of the list. */ -void dlock_list_add(struct dlock_list_node *node, struct dlock_list_head *head) +void dlock_list_add(struct dlock_list_node *node, + struct dlock_list_head __percpu *head) { struct dlock_list_head *myhead; -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html