On Mon, Apr 11, 2011 at 5:27 AM, Namhyung Kim <namhyung@xxxxxxxxx> wrote: > Split out some common code in shmem_truncate_range() in order to > improve readability (hopefully) and to reduce code duplication. > > Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxx> Thank you for taking the trouble to do this. However... all that shmem_swp index code is irredeemably unreadable (my fault, dating from when I converted it to use highmem with kmaps), and I'd rather leave it untouched until we simply delete it completely. I have a patch/set (good for my testing but not yet good for final submission) which removes all that code, and the need to allocate shmem_swp index pages (even when CONFIG_SWAP is not set!): instead saving the swp_entries in the standard pagecache radix_tree for the file, so no extra allocations are needed at all. It is possible that my patch/set will not be accepted (extending the radix_tree in that way may meet some resistance); but I do think that's the right way forward. Thanks, Hugh > --- > Âmm/shmem.c |  62 ++++++++++++++++++++++++++++++++++------------------------- > Â1 files changed, 36 insertions(+), 26 deletions(-) > > diff --git a/mm/shmem.c b/mm/shmem.c > index 58da7c150ba6..58ad1159678f 100644 > --- a/mm/shmem.c > +++ b/mm/shmem.c > @@ -531,6 +531,31 @@ static void shmem_free_pages(struct list_head *next) >    Â} while (next); > Â} > > +/** > + * remove_indirect_page - remove a indirect page from upper layer > + * @dir:    pointer to the indirect page in the upper layer > + * @page:   Âindirect page to be removed > + * @needs_lock:    Âpointer to spinlock if needed > + * @free_list: list which the removed page will be inserted into > + * > + * This function removes @page from @dir and insert it into @free_list. > + * The @page still remains after this function but will not be seen by > + * other tasks. Finally it will be freed via shmem_free_pages(). > + */ > +static void remove_indirect_page(struct page **dir, struct page *page, > +        spinlock_t *needs_lock, struct list_head *free_list) > +{ > +    if (needs_lock) { > +        spin_lock(needs_lock); > +        *dir = NULL; > +        spin_unlock(needs_lock); > +    } else { > +        *dir = NULL; > +    } > + > +    list_add(&page->lru, free_list); > +} > + > Âstatic void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) > Â{ >    Âstruct shmem_inode_info *info = SHMEM_I(inode); > @@ -582,9 +607,9 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) > >    Âtopdir = info->i_indirect; >    Âif (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { > -        info->i_indirect = NULL; > +        remove_indirect_page(&info->i_indirect, topdir, NULL, > +                  Â&pages_to_free); >        Ânr_pages_to_free++; > -        list_add(&topdir->lru, &pages_to_free); >    Â} >    Âspin_unlock(&info->lock); > > @@ -637,15 +662,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) >            Âdiroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % >                ÂENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; >            Âif (!diroff && !offset && upper_limit >= stage) { > -                if (needs_lock) { > -                    spin_lock(needs_lock); > -                    *dir = NULL; > -                    spin_unlock(needs_lock); > -                    needs_lock = NULL; > -                } else > -                    *dir = NULL; > +                remove_indirect_page(dir, middir, needs_lock, > +                          Â&pages_to_free); >                Ânr_pages_to_free++; > -                list_add(&middir->lru, &pages_to_free); > +                needs_lock = NULL; >            Â} >            Âshmem_dir_unmap(dir); >            Âdir = shmem_dir_map(middir); > @@ -672,15 +692,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) >            Âif (punch_hole) >                Âneeds_lock = &info->lock; >            Âif (upper_limit >= stage) { > -                if (needs_lock) { > -                    spin_lock(needs_lock); > -                    *dir = NULL; > -                    spin_unlock(needs_lock); > -                    needs_lock = NULL; > -                } else > -                    *dir = NULL; > +                remove_indirect_page(dir, middir, needs_lock, > +                          Â&pages_to_free); >                Ânr_pages_to_free++; > -                list_add(&middir->lru, &pages_to_free); > +                needs_lock = NULL; >            Â} >            Âshmem_dir_unmap(dir); >            Âcond_resched(); > @@ -690,15 +705,10 @@ static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) >        Âpunch_lock = needs_lock; >        Âsubdir = dir[diroff]; >        Âif (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { > -            if (needs_lock) { > -                spin_lock(needs_lock); > -                dir[diroff] = NULL; > -                spin_unlock(needs_lock); > -                punch_lock = NULL; > -            } else > -                dir[diroff] = NULL; > +            remove_indirect_page(&dir[diroff], subdir, needs_lock, > +                      Â&pages_to_free); >            Ânr_pages_to_free++; > -            list_add(&subdir->lru, &pages_to_free); > +            punch_lock = NULL; >        Â} >        Âif (subdir && page_private(subdir) /* has swap entries */) { >            Âsize = limit - idx; > -- > 1.7.4 > > ÿô.nÇ·ÿ±ég¬±¨Âaþé»®&Þ)î¦þ)íèh¨è&£ù¢¸ÿæ¢ú»þÇþm§ÿÿÃÿ)î¦þàbnö¥yÊ{^®wr«ë&§iÖ²('Ûÿÿìm éê¯Ãí¢ÿÚ·ÚýiÉ¢¸ÿý½§$þàÿ