To increase mm->mm_lock_seq robustness, switch it from int to long, so that it's a 64-bit counter on 64-bit systems and we can stop worrying about it wrapping around in just ~4 billion iterations. Same goes for VMA's matching vm_lock_seq, which is derived from mm_lock_seq. I didn't use __u64 outright to keep 32-bit architectures unaffected, but if it seems important enough, I have nothing against using __u64. Suggested-by: Jann Horn <jannh@xxxxxxxxxx> Signed-off-by: Andrii Nakryiko <andrii@xxxxxxxxxx> --- include/linux/mm.h | 6 +++--- include/linux/mm_types.h | 4 ++-- include/linux/mmap_lock.h | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index ecf63d2b0582..97819437832e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -730,7 +730,7 @@ static inline void vma_end_read(struct vm_area_struct *vma) } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ -static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) +static bool __is_vma_write_locked(struct vm_area_struct *vma, long *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); @@ -749,7 +749,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) */ static inline void vma_start_write(struct vm_area_struct *vma) { - int mm_lock_seq; + long mm_lock_seq; if (__is_vma_write_locked(vma, &mm_lock_seq)) return; @@ -767,7 +767,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) static inline void vma_assert_write_locked(struct vm_area_struct *vma) { - int mm_lock_seq; + long mm_lock_seq; VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5d8cdebd42bc..0dc57d6cfe38 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -715,7 +715,7 @@ struct vm_area_struct { * counter reuse can only lead to occasional unnecessary use of the * slowpath. */ - int vm_lock_seq; + long vm_lock_seq; /* Unstable RCU readers are allowed to read this. */ struct vma_lock *vm_lock; #endif @@ -898,7 +898,7 @@ struct mm_struct { * Can be read with ACQUIRE semantics if not holding write * mmap_lock. */ - int mm_lock_seq; + long mm_lock_seq; #endif diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 9d23635bc701..f8fd6d879aa9 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -105,7 +105,7 @@ static inline void inc_mm_lock_seq(struct mm_struct *mm, bool acquire) } } -static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq) +static inline bool mmap_lock_speculation_start(struct mm_struct *mm, long *seq) { /* Pairs with RELEASE semantics in inc_mm_lock_seq(). */ *seq = smp_load_acquire(&mm->mm_lock_seq); @@ -113,7 +113,7 @@ static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq) return (*seq & 1) == 0; } -static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq) +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, long seq) { /* Pairs with ACQUIRE semantics in inc_mm_lock_seq(). */ smp_rmb(); @@ -123,8 +123,8 @@ static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq) #else static inline void init_mm_lock_seq(struct mm_struct *mm) {} static inline void inc_mm_lock_seq(struct mm_struct *mm, bool acquire) {} -static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq) { return false; } -static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq) { return false; } +static inline bool mmap_lock_speculation_start(struct mm_struct *mm, long *seq) { return false; } +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, long seq) { return false; } #endif /* -- 2.43.5