On Fri, Oct 02, 2015 at 03:35:49PM +0200, Vlastimil Babka wrote: > +static unsigned long smaps_shmem_swap(struct vm_area_struct *vma) > +{ > + struct inode *inode; > + unsigned long swapped; > + pgoff_t start, end; > + > + if (!vma->vm_file) > + return 0; > + > + inode = file_inode(vma->vm_file); > + > + if (!shmem_mapping(inode->i_mapping)) > + return 0; > + > + /* > + * The easier cases are when the shmem object has nothing in swap, or > + * we have the whole object mapped. Then we can simply use the stats > + * that are already tracked by shmem. > + */ > + swapped = shmem_swap_usage(inode); > + > + if (swapped == 0) > + return 0; > + > + if (vma->vm_end - vma->vm_start >= inode->i_size) > + return swapped; > + > + /* > + * Here we have to inspect individual pages in our mapped range to > + * determine how much of them are swapped out. Thanks to RCU, we don't > + * need i_mutex to protect against truncating or hole punching. > + */ At the very least put in an assertion that we hold the RCU read lock, otherwise RCU doesn't guarantee anything and its not obvious it is held here. > + start = linear_page_index(vma, vma->vm_start); > + end = linear_page_index(vma, vma->vm_end); > + > + return shmem_partial_swap_usage(inode->i_mapping, start, end); > +} > + * Determine (in bytes) how much of the whole shmem object is swapped out. > + */ > +unsigned long shmem_swap_usage(struct inode *inode) > +{ > + struct shmem_inode_info *info = SHMEM_I(inode); > + unsigned long swapped; > + > + /* Mostly an overkill, but it's not atomic64_t */ Yeah, that don't make any kind of sense. > + spin_lock(&info->lock); > + swapped = info->swapped; > + spin_unlock(&info->lock); > + > + return swapped << PAGE_SHIFT; > +} -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>