To take advantage of optimizations when adding pages to the page cache via shmem_insert_pages(), improve the likelihood that the pages array passed to shmem_insert_pages() starts on an aligned index. Do this when preserving pages by starting a new pkram_link page when the current page is aligned and the next aligned page will not fit on the pkram_link page. Signed-off-by: Anthony Yznaga <anthony.yznaga@xxxxxxxxxx> --- mm/pkram.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/mm/pkram.c b/mm/pkram.c index ef092aa5ce7a..416c3ca4411b 100644 --- a/mm/pkram.c +++ b/mm/pkram.c @@ -913,11 +913,21 @@ static int __pkram_save_page(struct pkram_stream *ps, { struct pkram_link *link = ps->link; struct pkram_obj *obj = ps->obj; + int order, align, align_cnt; pkram_entry_t p; - int order; + + if (PageTransHuge(page)) { + align = 1 << (HPAGE_PMD_ORDER + XA_CHUNK_SHIFT - (HPAGE_PMD_ORDER % XA_CHUNK_SHIFT)); + align_cnt = align >> HPAGE_PMD_ORDER; + } else { + align = XA_CHUNK_SIZE; + align_cnt = XA_CHUNK_SIZE; + } if (!link || ps->entry_idx >= PKRAM_LINK_ENTRIES_MAX || - index != ps->next_index) { + index != ps->next_index || + (IS_ALIGNED(index, align) && + (ps->entry_idx + align_cnt > PKRAM_LINK_ENTRIES_MAX))) { struct page *link_page; link_page = pkram_alloc_page((ps->gfp_mask & GFP_RECLAIM_MASK) | -- 2.13.3 _______________________________________________ kexec mailing list kexec@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/kexec