On Wed, Jan 11, 2017 at 3:52 PM, Vitaly Wool <vitalywool@xxxxxxxxx> wrote: > On Wed, 11 Jan 2017 17:43:13 +0100 > Vitaly Wool <vitalywool@xxxxxxxxx> wrote: > >> On Wed, Jan 11, 2017 at 5:28 PM, Dan Streetman <ddstreet@xxxxxxxx> wrote: >> > On Wed, Jan 11, 2017 at 10:06 AM, Vitaly Wool <vitalywool@xxxxxxxxx> wrote: >> >> z3fold_compact_page() currently only handles the situation when >> >> there's a single middle chunk within the z3fold page. However it >> >> may be worth it to move middle chunk closer to either first or >> >> last chunk, whichever is there, if the gap between them is big >> >> enough. >> >> >> >> This patch adds the relevant code, using BIG_CHUNK_GAP define as >> >> a threshold for middle chunk to be worth moving. >> >> >> >> Signed-off-by: Vitaly Wool <vitalywool@xxxxxxxxx> >> >> --- >> >> mm/z3fold.c | 26 +++++++++++++++++++++++++- >> >> 1 file changed, 25 insertions(+), 1 deletion(-) >> >> >> >> diff --git a/mm/z3fold.c b/mm/z3fold.c >> >> index 98ab01f..fca3310 100644 >> >> --- a/mm/z3fold.c >> >> +++ b/mm/z3fold.c >> >> @@ -268,6 +268,7 @@ static inline void *mchunk_memmove(struct z3fold_header *zhdr, >> >> zhdr->middle_chunks << CHUNK_SHIFT); >> >> } >> >> >> >> +#define BIG_CHUNK_GAP 3 >> >> /* Has to be called with lock held */ >> >> static int z3fold_compact_page(struct z3fold_header *zhdr) >> >> { >> >> @@ -286,8 +287,31 @@ static int z3fold_compact_page(struct z3fold_header *zhdr) >> >> zhdr->middle_chunks = 0; >> >> zhdr->start_middle = 0; >> >> zhdr->first_num++; >> >> + return 1; >> >> } >> >> - return 1; >> >> + >> >> + /* >> >> + * moving data is expensive, so let's only do that if >> >> + * there's substantial gain (at least BIG_CHUNK_GAP chunks) >> >> + */ >> >> + if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && >> >> + zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= >> >> + BIG_CHUNK_GAP) { >> >> + mchunk_memmove(zhdr, zhdr->first_chunks + 1); >> >> + zhdr->start_middle = zhdr->first_chunks + 1; >> > >> > this should be first_chunks + ZHDR_CHUNKS, not + 1. >> > >> >> + return 1; >> >> + } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && >> >> + TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle >> >> + + zhdr->middle_chunks) >= >> >> + BIG_CHUNK_GAP) { >> >> + unsigned short new_start = NCHUNKS - zhdr->last_chunks - >> > >> > this should be TOTAL_CHUNKS, not NCHUNKS. >> >> Right :/ > > So here we go: > > > z3fold_compact_page() currently only handles the situation when > there's a single middle chunk within the z3fold page. However it > may be worth it to move middle chunk closer to either first or > last chunk, whichever is there, if the gap between them is big > enough. > > This patch adds the relevant code, using BIG_CHUNK_GAP define as > a threshold for middle chunk to be worth moving. > > Signed-off-by: Vitaly Wool <vitalywool@xxxxxxxxx> Acked-by: Dan Streetman <ddstreet@xxxxxxxx> > --- > mm/z3fold.c | 26 +++++++++++++++++++++++++- > 1 file changed, 25 insertions(+), 1 deletion(-) > > diff --git a/mm/z3fold.c b/mm/z3fold.c > index 98ab01f..fca3310 100644 > --- a/mm/z3fold.c > +++ b/mm/z3fold.c > @@ -268,6 +268,7 @@ static inline void *mchunk_memmove(struct z3fold_header *zhdr, > zhdr->middle_chunks << CHUNK_SHIFT); > } > > +#define BIG_CHUNK_GAP 3 > /* Has to be called with lock held */ > static int z3fold_compact_page(struct z3fold_header *zhdr) > { > @@ -286,8 +287,31 @@ static int z3fold_compact_page(struct z3fold_header *zhdr) > zhdr->middle_chunks = 0; > zhdr->start_middle = 0; > zhdr->first_num++; > + return 1; > } > - return 1; > + > + /* > + * moving data is expensive, so let's only do that if > + * there's substantial gain (at least BIG_CHUNK_GAP chunks) > + */ > + if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && > + zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= > + BIG_CHUNK_GAP) { > + mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); > + zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; > + return 1; > + } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && > + TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle > + + zhdr->middle_chunks) >= > + BIG_CHUNK_GAP) { > + unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - > + zhdr->middle_chunks; > + mchunk_memmove(zhdr, new_start); > + zhdr->start_middle = new_start; > + return 1; > + } > + > + return 0; > } > > /** > -- > 2.4.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>