The patch titled x86 rwsem: minor cleanups has been added to the -mm tree. Its filename is x86-rwsem-minor-cleanups.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** See http://userweb.kernel.org/~akpm/stuff/added-to-mm.txt to find out what to do about this The current -mm tree may be found at http://userweb.kernel.org/~akpm/mmotm/ ------------------------------------------------------ Subject: x86 rwsem: minor cleanups From: Michel Lespinasse <walken@xxxxxxxxxx> Clarified few comments and made initialization of %edx/%rdx more uniform accross __down_write_nested, __up_read and __up_write functions. Signed-off-by: Michel Lespinasse <walken@xxxxxxxxxx> Acked-by: David Howells <dhowells@xxxxxxxxxx> Cc: Mike Waychison <mikew@xxxxxxxxxx> Cc: Suleiman Souhlal <suleiman@xxxxxxxxxx> Cc: Ying Han <yinghan@xxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/rwsem.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff -puN arch/x86/include/asm/rwsem.h~x86-rwsem-minor-cleanups arch/x86/include/asm/rwsem.h --- a/arch/x86/include/asm/rwsem.h~x86-rwsem-minor-cleanups +++ a/arch/x86/include/asm/rwsem.h @@ -118,7 +118,7 @@ static inline void __down_read(struct rw { asm volatile("# beginning down_read\n\t" LOCK_PREFIX _ASM_INC "(%1)\n\t" - /* adds 0x00000001, returns the old value */ + /* adds 0x00000001 */ " jns 1f\n" " call call_rwsem_down_read_failed\n" "1:\n\t" @@ -156,11 +156,9 @@ static inline int __down_read_trylock(st static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { rwsem_count_t tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; asm volatile("# beginning down_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" - /* subtract 0x0000ffff, returns the old value */ + /* adds 0xffff0001, returns the old value */ " test %1,%1\n\t" /* was the count 0 before? */ " jz 1f\n" @@ -168,7 +166,7 @@ static inline void __down_write_nested(s "1:\n" "# ending down_write" : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) + : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) : "memory", "cc"); } @@ -195,16 +193,16 @@ static inline int __down_write_trylock(s */ static inline void __up_read(struct rw_semaphore *sem) { - rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; + rwsem_count_t tmp; asm volatile("# beginning __up_read\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" /* subtracts 1, returns the old value */ " jns 1f\n\t" - " call call_rwsem_wake\n" + " call call_rwsem_wake\n" /* expects old value in %edx */ "1:\n" "# ending __up_read\n" : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) + : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS) : "memory", "cc"); } @@ -218,7 +216,7 @@ static inline void __up_write(struct rw_ LOCK_PREFIX " xadd %1,(%2)\n\t" /* substracts 0xffff0001, returns the old value */ " jns 1f\n\t" - " call call_rwsem_wake\n" + " call call_rwsem_wake\n" /* expects old value in %edx */ "1:\n\t" "# ending __up_write\n" : "+m" (sem->count), "=d" (tmp) _ Patches currently in -mm which might be from walken@xxxxxxxxxx are x86-rwsem-stay-on-fast-path-when-count0-in-__up_write.patch x86-rwsem-minor-cleanups.patch rwsem-fully-separate-code-pathes-to-wake-writers-vs-readers.patch rwsem-lighter-active-count-checks-when-waking-up-readers.patch rwsem-let-rwsem_waiting_bias-represent-any-number-of-waiting-threads.patch rwsem-wake-queued-readers-when-writer-blocks-on-active-read-lock.patch rwsem-smaller-wrappers-around-rwsem_down_failed_common.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html