Seems reasonable to me (also Shaggy acked this). Any idea how likely it would affect performance (and would thus need to run through some simple iozone and cthon (timing mode) or similar perf testing) On Wed, Mar 17, 2010 at 1:23 AM, Nick Piggin <npiggin@xxxxxxx> wrote: > cifs: use add_to_page_cache_lru > > add_to_page_cache_lru is exported, so it should be used. Benefits over > using a private pagevec: neater code, 128 bytes fewer stack used, percpu > lru ordering is preserved, and finally don't need to flush pagevec > before returning so batching may be shared with other LRU insertions. > > Signed-off-by: Nick Piggin <npiggin@xxxxxxx>: > --- > fs/cifs/file.c | 13 +++---------- > 1 file changed, 3 insertions(+), 10 deletions(-) > > Index: linux-2.6/fs/cifs/file.c > =================================================================== > --- linux-2.6.orig/fs/cifs/file.c > +++ linux-2.6/fs/cifs/file.c > @@ -1907,8 +1907,7 @@ int cifs_file_mmap(struct file *file, st > > > static void cifs_copy_cache_pages(struct address_space *mapping, > - struct list_head *pages, int bytes_read, char *data, > - struct pagevec *plru_pvec) > + struct list_head *pages, int bytes_read, char *data) > { > struct page *page; > char *target; > @@ -1920,7 +1919,7 @@ static void cifs_copy_cache_pages(struct > page = list_entry(pages->prev, struct page, lru); > list_del(&page->lru); > > - if (add_to_page_cache(page, mapping, page->index, > + if (add_to_page_cache_lru(page, mapping, page->index, > GFP_KERNEL)) { > page_cache_release(page); > cFYI(1, ("Add page cache failed")); > @@ -1946,8 +1945,6 @@ static void cifs_copy_cache_pages(struct > flush_dcache_page(page); > SetPageUptodate(page); > unlock_page(page); > - if (!pagevec_add(plru_pvec, page)) > - __pagevec_lru_add_file(plru_pvec); > data += PAGE_CACHE_SIZE; > } > return; > @@ -1966,7 +1963,6 @@ static int cifs_readpages(struct file *f > unsigned int read_size, i; > char *smb_read_data = NULL; > struct smb_com_read_rsp *pSMBr; > - struct pagevec lru_pvec; > struct cifsFileInfo *open_file; > int buf_type = CIFS_NO_BUFFER; > > @@ -1980,7 +1976,6 @@ static int cifs_readpages(struct file *f > cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); > pTcon = cifs_sb->tcon; > > - pagevec_init(&lru_pvec, 0); > cFYI(DBG2, ("rpages: num pages %d", num_pages)); > for (i = 0; i < num_pages; ) { > unsigned contig_pages; > @@ -2049,7 +2044,7 @@ static int cifs_readpages(struct file *f > pSMBr = (struct smb_com_read_rsp *)smb_read_data; > cifs_copy_cache_pages(mapping, page_list, bytes_read, > smb_read_data + 4 /* RFC1001 hdr */ + > - le16_to_cpu(pSMBr->DataOffset), &lru_pvec); > + le16_to_cpu(pSMBr->DataOffset)); > > i += bytes_read >> PAGE_CACHE_SHIFT; > cifs_stats_bytes_read(pTcon, bytes_read); > @@ -2082,8 +2077,6 @@ static int cifs_readpages(struct file *f > bytes_read = 0; > } > > - pagevec_lru_add_file(&lru_pvec); > - > /* need to free smb_read_data buf before exit */ > if (smb_read_data) { > if (buf_type == CIFS_SMALL_BUFFER) > -- Thanks, Steve -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html