On 2015/09/21, 2:25 PM, "Mike Rapoport" <mike.rapoport@xxxxxxxxx> wrote: >Use BIT_ULL() macro instead of long hexadecimal constants with only >single bit set. > >Signed-off-by: Mike Rapoport <mike.rapoport@xxxxxxxxx> >--- > .../lustre/lustre/include/lustre_dlm_flags.h | 80 >+++++++++++----------- > 1 file changed, 40 insertions(+), 40 deletions(-) > >diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h >b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h >index d27bdae0..a247cbf 100644 >--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h >+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h >@@ -50,7 +50,7 @@ > #define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL > > /** extent, mode, or resource changed */ >-#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL // bit 0 >+#define LDLM_FL_LOCK_CHANGED BIT_ULL(0) IMHO, this is a step backward in usability. When I see a debug log that prints these lock flags in hex, for example 0x200801220, it is easier to look at those flags and see it is FL_SKIPPED, FL_TEST_LOCK, FL_HAS_INTENT, FL_INTENT_ONLY, FL_AST_SENT. If they are declared as "BIT_ULL(33)" that is not very useful for humans actually looking at this code. I don't see any benefits to changing these declarations to use BIT_ULL(). > #define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG((_l), 1ULL << 0) > #define ldlm_set_lock_changed(_l) LDLM_SET_FLAG((_l), 1ULL << 0) > #define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0) >@@ -58,7 +58,7 @@ > /** > * Server placed lock on granted list, or a recovering client wants the > * lock added to the granted list, no questions asked. */ >-#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL // bit 1 >+#define LDLM_FL_BLOCK_GRANTED BIT_ULL(1) > #define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1) > #define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1) > #define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1) >@@ -66,7 +66,7 @@ > /** > * Server placed lock on conv list, or a recovering client wants the lock > * added to the conv list, no questions asked. */ >-#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL // bit 2 >+#define LDLM_FL_BLOCK_CONV BIT_ULL(2) > #define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2) > #define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2) > #define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2) >@@ -74,13 +74,13 @@ > /** > * Server placed lock on wait list, or a recovering client wants the lock > * added to the wait list, no questions asked. */ >-#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL // bit 3 >+#define LDLM_FL_BLOCK_WAIT BIT_ULL(3) > #define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3) > #define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3) > #define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3) > > /** blocking or cancel packet was queued for sending. */ >-#define LDLM_FL_AST_SENT 0x0000000000000020ULL // bit 5 >+#define LDLM_FL_AST_SENT BIT_ULL(5) > #define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG((_l), 1ULL << 5) > #define ldlm_set_ast_sent(_l) LDLM_SET_FLAG((_l), 1ULL << 5) > #define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5) >@@ -88,37 +88,37 @@ > /** > * Lock is being replayed. This could probably be implied by the fact >that > * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty >dangerous. */ >-#define LDLM_FL_REPLAY 0x0000000000000100ULL // bit 8 >+#define LDLM_FL_REPLAY BIT_ULL(8) > #define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8) > #define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8) > #define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8) > > /** Don't grant lock, just do intent. */ >-#define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL // bit 9 >+#define LDLM_FL_INTENT_ONLY BIT_ULL(9) > #define ldlm_is_intent_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 9) > #define ldlm_set_intent_only(_l) LDLM_SET_FLAG((_l), 1ULL << 9) > #define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9) > > /** lock request has intent */ >-#define LDLM_FL_HAS_INTENT 0x0000000000001000ULL // bit 12 >+#define LDLM_FL_HAS_INTENT BIT_ULL(12) > #define ldlm_is_has_intent(_l) LDLM_TEST_FLAG((_l), 1ULL << 12) > #define ldlm_set_has_intent(_l) LDLM_SET_FLAG((_l), 1ULL << 12) > #define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12) > > /** flock deadlock detected */ >-#define LDLM_FL_FLOCK_DEADLOCK 0x0000000000008000ULL /* bit 15 >*/ >+#define LDLM_FL_FLOCK_DEADLOCK BIT_ULL(15) > #define ldlm_is_flock_deadlock(_l) LDLM_TEST_FLAG((_l), 1ULL << 15) > #define ldlm_set_flock_deadlock(_l) LDLM_SET_FLAG((_l), 1ULL << 15) > #define ldlm_clear_flock_deadlock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 15) > > /** discard (no writeback) on cancel */ >-#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL // bit 16 >+#define LDLM_FL_DISCARD_DATA BIT_ULL(16) > #define ldlm_is_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 16) > #define ldlm_set_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 16) > #define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16) > > /** Blocked by group lock - wait indefinitely */ >-#define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL // bit 17 >+#define LDLM_FL_NO_TIMEOUT BIT_ULL(17) > #define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG((_l), 1ULL << 17) > #define ldlm_set_no_timeout(_l) LDLM_SET_FLAG((_l), 1ULL << 17) > #define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17) >@@ -126,13 +126,13 @@ > /** > * Server told not to wait if blocked. For AGL, OST will not send glimpse > * callback. */ >-#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL // bit 18 >+#define LDLM_FL_BLOCK_NOWAIT BIT_ULL(18) > #define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18) > #define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18) > #define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18) > > /** return blocking lock */ >-#define LDLM_FL_TEST_LOCK 0x0000000000080000ULL // bit 19 >+#define LDLM_FL_TEST_LOCK BIT_ULL(19) > #define ldlm_is_test_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 19) > #define ldlm_set_test_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 19) > #define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19) >@@ -142,14 +142,14 @@ > * cancel notification to original lock holder, but expect no reply. This > * is for clients (like liblustre) that cannot be expected to reliably > * response to blocking AST. */ >-#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL // bit 23 >+#define LDLM_FL_CANCEL_ON_BLOCK BIT_ULL(23) > #define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23) > #define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23) > #define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23) > > /** > * measure lock contention and return -EUSERS if locking contention is >high */ >-#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL // bit >30 >+#define LDLM_FL_DENY_ON_CONTENTION BIT_ULL(30) > #define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG((_l), 1ULL << >30) > #define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG((_l), 1ULL << 30) > #define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << >30) >@@ -157,7 +157,7 @@ > /** > * These are flags that are mapped into the flags and ASTs of blocking > * locks Add FL_DISCARD to blocking ASTs */ >-#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL // bit 31 >+#define LDLM_FL_AST_DISCARD_DATA BIT_ULL(31) > #define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 31) > #define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 31) > #define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31) >@@ -165,7 +165,7 @@ > /** > * Used for marking lock as a target for -EINTR while cp_ast sleep >emulation > * + race with upcoming bl_ast. */ >-#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL // bit 32 >+#define LDLM_FL_FAIL_LOC BIT_ULL(32) > #define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32) > #define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32) > #define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32) >@@ -173,49 +173,49 @@ > /** > * Used while processing the unused list to know that we have already > * handled this lock and decided to skip it. */ >-#define LDLM_FL_SKIPPED 0x0000000200000000ULL // bit 33 >+#define LDLM_FL_SKIPPED BIT_ULL(33) > #define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33) > #define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33) > #define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33) > > /** this lock is being destroyed */ >-#define LDLM_FL_CBPENDING 0x0000000400000000ULL // bit 34 >+#define LDLM_FL_CBPENDING BIT_ULL(34) > #define ldlm_is_cbpending(_l) LDLM_TEST_FLAG((_l), 1ULL << 34) > #define ldlm_set_cbpending(_l) LDLM_SET_FLAG((_l), 1ULL << 34) > #define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34) > > /** not a real flag, not saved in lock */ >-#define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL // bit 35 >+#define LDLM_FL_WAIT_NOREPROC BIT_ULL(35) > #define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG((_l), 1ULL << 35) > #define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG((_l), 1ULL << 35) > #define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35) > > /** cancellation callback already run */ >-#define LDLM_FL_CANCEL 0x0000001000000000ULL // bit 36 >+#define LDLM_FL_CANCEL BIT_ULL(36) > #define ldlm_is_cancel(_l) LDLM_TEST_FLAG((_l), 1ULL << 36) > #define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36) > #define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36) > > /** whatever it might mean */ >-#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL // bit 37 >+#define LDLM_FL_LOCAL_ONLY BIT_ULL(37) > #define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37) > #define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37) > #define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37) > > /** don't run the cancel callback under ldlm_cli_cancel_unused */ >-#define LDLM_FL_FAILED 0x0000004000000000ULL // bit 38 >+#define LDLM_FL_FAILED BIT_ULL(38) > #define ldlm_is_failed(_l) LDLM_TEST_FLAG((_l), 1ULL << 38) > #define ldlm_set_failed(_l) LDLM_SET_FLAG((_l), 1ULL << 38) > #define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38) > > /** lock cancel has already been sent */ >-#define LDLM_FL_CANCELING 0x0000008000000000ULL // bit 39 >+#define LDLM_FL_CANCELING BIT_ULL(39) > #define ldlm_is_canceling(_l) LDLM_TEST_FLAG((_l), 1ULL << 39) > #define ldlm_set_canceling(_l) LDLM_SET_FLAG((_l), 1ULL << 39) > #define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39) > > /** local lock (ie, no srv/cli split) */ >-#define LDLM_FL_LOCAL 0x0000010000000000ULL // bit 40 >+#define LDLM_FL_LOCAL BIT_ULL(40) > #define ldlm_is_local(_l) LDLM_TEST_FLAG((_l), 1ULL << 40) > #define ldlm_set_local(_l) LDLM_SET_FLAG((_l), 1ULL << 40) > #define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40) >@@ -232,7 +232,7 @@ > * which can be replaced with a LVB-aware wrapping function for OSC >locks. > * That change is pretty high-risk, though, and would need a lot more > * testing. */ >-#define LDLM_FL_LVB_READY 0x0000020000000000ULL // bit 41 >+#define LDLM_FL_LVB_READY BIT_ULL(41) > #define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41) > #define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41) > #define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41) >@@ -244,19 +244,19 @@ > * Threads racing to update the KMS after performing their writeback need > * to know to exclude each other's locks from the calculation as they >walk > * the granted list. */ >-#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL // bit 42 >+#define LDLM_FL_KMS_IGNORE BIT_ULL(42) > #define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42) > #define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42) > #define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42) > > /** completion AST to be executed */ >-#define LDLM_FL_CP_REQD 0x0000080000000000ULL // bit 43 >+#define LDLM_FL_CP_REQD BIT_ULL(43) > #define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG((_l), 1ULL << 43) > #define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG((_l), 1ULL << 43) > #define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43) > > /** cleanup_resource has already handled the lock */ >-#define LDLM_FL_CLEANED 0x0000100000000000ULL // bit 44 >+#define LDLM_FL_CLEANED BIT_ULL(44) > #define ldlm_is_cleaned(_l) LDLM_TEST_FLAG((_l), 1ULL << 44) > #define ldlm_set_cleaned(_l) LDLM_SET_FLAG((_l), 1ULL << 44) > #define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44) >@@ -264,7 +264,7 @@ > /** > * optimization hint: LDLM can run blocking callback from current context > * w/o involving separate thread. in order to decrease cs rate */ >-#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL // bit 45 >+#define LDLM_FL_ATOMIC_CB BIT_ULL(45) > #define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45) > #define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45) > #define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45) >@@ -281,13 +281,13 @@ > * dropped to let ldlm_callback_handler() return EINVAL to the server. It > * is used when ELC RPC is already prepared and is waiting for rpc_lock, > * too late to send a separate CANCEL RPC. */ >-#define LDLM_FL_BL_AST 0x0000400000000000ULL // bit 46 >+#define LDLM_FL_BL_AST BIT_ULL(46) > #define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46) > #define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46) > #define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46) > > /** whatever it might mean */ >-#define LDLM_FL_BL_DONE 0x0000800000000000ULL // bit 47 >+#define LDLM_FL_BL_DONE BIT_ULL(47) > #define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47) > #define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47) > #define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47) >@@ -296,7 +296,7 @@ > * Don't put lock into the LRU list, so that it is not canceled due > * to aging. Used by MGC locks, they are cancelled only at unmount or > * by callback. */ >-#define LDLM_FL_NO_LRU 0x0001000000000000ULL // bit 48 >+#define LDLM_FL_NO_LRU BIT_ULL(48) > #define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48) > #define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48) > #define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48) >@@ -305,7 +305,7 @@ > * Set for locks that failed and where the server has been notified. > * > * Protected by lock and resource locks. */ >-#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL // bit 49 >+#define LDLM_FL_FAIL_NOTIFIED BIT_ULL(49) > #define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49) > #define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49) > #define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49) >@@ -316,13 +316,13 @@ > * ldlm_lock_destroy_internal(). > * > * Protected by lock and resource locks. */ >-#define LDLM_FL_DESTROYED 0x0004000000000000ULL // bit 50 >+#define LDLM_FL_DESTROYED BIT_ULL(50) > #define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50) > #define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50) > #define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50) > > /** flag whether this is a server namespace lock */ >-#define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL // bit 51 >+#define LDLM_FL_SERVER_LOCK BIT_ULL(51) > #define ldlm_is_server_lock(_l) LDLM_TEST_FLAG((_l), 1ULL << 51) > #define ldlm_set_server_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 51) > #define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51) >@@ -334,7 +334,7 @@ > * Also, spin_is_locked() is deprecated for kernel code; one reason is > * because it works only for SMP so user needs to add extra macros like > * LASSERT_SPIN_LOCKED for uniprocessor kernels. */ >-#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL // bit 52 >+#define LDLM_FL_RES_LOCKED BIT_ULL(52) > #define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52) > #define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52) > #define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52) >@@ -344,19 +344,19 @@ > * lock-timeout timer and it will never be reset. > * > * Protected by lock and resource locks. */ >-#define LDLM_FL_WAITED 0x0020000000000000ULL // bit 53 >+#define LDLM_FL_WAITED BIT_ULL(53) > #define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53) > #define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53) > #define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53) > > /** Flag whether this is a server namespace lock. */ >-#define LDLM_FL_NS_SRV 0x0040000000000000ULL // bit 54 >+#define LDLM_FL_NS_SRV BIT_ULL(54) > #define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG((_l), 1ULL << 54) > #define ldlm_set_ns_srv(_l) LDLM_SET_FLAG((_l), 1ULL << 54) > #define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54) > > /** Flag whether this lock can be reused. Used by exclusive open. */ >-#define LDLM_FL_EXCL 0x0080000000000000ULL /* bit 55 >*/ >+#define LDLM_FL_EXCL BIT_ULL(55) > #define ldlm_is_excl(_l) LDLM_TEST_FLAG((_l), 1ULL << 55) > #define ldlm_set_excl(_l) LDLM_SET_FLAG((_l), 1ULL << 55) > #define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55) >-- >2.1.0 > > Cheers, Andreas -- Andreas Dilger Lustre Software Architect Intel High Performance Data Division _______________________________________________ devel mailing list devel@xxxxxxxxxxxxxxxxxxxxxx http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel