The patch titled Subject: lib/lzo: implement run-length encoding has been added to the -mm tree. Its filename is lib-lzo-implement-run-length-encoding.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/lib-lzo-implement-run-length-encoding.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/lib-lzo-implement-run-length-encoding.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Dave Rodgman <dave.rodgman@xxxxxxx> Subject: lib/lzo: implement run-length encoding When using zram, we frequently encounter long runs of zero bytes. This adds a special case which identifies runs of zeros and encodes them using run-length encoding. This is faster for both compression and decompresion. For high-entropy data which doesn't hit this case, impact is minimal. Compression ratio is within a few percent in all cases. This modifies the bitstream in a way which is backwards compatible (i.e., we can decompress old bitstreams, but old versions of lzo cannot decompress new bitstreams). Link: http://lkml.kernel.org/r/20181127161913.23863-7-dave.rodgman@xxxxxxx Signed-off-by: Dave Rodgman <dave.rodgman@xxxxxxx> Cc: David S. Miller <davem@xxxxxxxxxxxxx> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> Cc: Markus F.X.J. Oberhumer <markus@xxxxxxxxxxxxx> Cc: Matt Sealey <matt.sealey@xxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Nitin Gupta <nitingupta910@xxxxxxxxx> Cc: Richard Purdie <rpurdie@xxxxxxxxxxxxxx> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@xxxxxxxxx> Cc: Sonny Rao <sonnyrao@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/Documentation/lzo.txt~lib-lzo-implement-run-length-encoding +++ a/Documentation/lzo.txt @@ -78,16 +78,30 @@ Description is an implementation design choice independent on the algorithm or encoding. +Versions + +0: Original version +1: LZO-RLE + +Version 1 of LZO implements an extension to encode runs of zeros using run +length encoding. This improves speed for data with many zeros, which is a +common case for zram. This modifies the bitstream in a backwards compatible way +(v1 can correctly decompress v0 compressed data, but v0 cannot read v1 data). + Byte sequences ============== First byte encoding:: - 0..17 : follow regular instruction encoding, see below. It is worth - noting that codes 16 and 17 will represent a block copy from - the dictionary which is empty, and that they will always be + 0..16 : follow regular instruction encoding, see below. It is worth + noting that code 16 will represent a block copy from the + dictionary which is empty, and that it will always be invalid at this place. + 17 : bitstream version. If the first byte is 17, the next byte + gives the bitstream version. If the first byte is not 17, + the bitstream version is 0. + 18..21 : copy 0..3 literals state = (byte - 17) = 0..3 [ copy <state> literals ] skip byte @@ -140,6 +154,11 @@ Byte sequences state = S (copy S literals after this block) End of stream is reached if distance == 16384 + In version 1, this instruction is also used to encode a run of zeros if + distance = 0xbfff, i.e. H = 1 and the D bits are all 1. + In this case, it is followed by a fourth byte, X. + run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4. + 0 0 1 L L L L L (32..63) Copy of small block within 16kB distance (preferably less than 34B) length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) @@ -165,7 +184,9 @@ Authors ======= This document was written by Willy Tarreau <w@xxxxxx> on 2014/07/19 during an - analysis of the decompression code available in Linux 3.16-rc5. The code is - tricky, it is possible that this document contains mistakes or that a few - corner cases were overlooked. In any case, please report any doubt, fix, or - proposed updates to the author(s) so that the document can be updated. + analysis of the decompression code available in Linux 3.16-rc5, and updated + by Dave Rodgman <dave.rodgman@xxxxxxx> on 2018/10/30 to introduce run-length + encoding. The code is tricky, it is possible that this document contains + mistakes or that a few corner cases were overlooked. In any case, please + report any doubt, fix, or proposed updates to the author(s) so that the + document can be updated. --- a/include/linux/lzo.h~lib-lzo-implement-run-length-encoding +++ a/include/linux/lzo.h @@ -18,7 +18,7 @@ #define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short)) #define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS -#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3) +#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2) /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzo1x_1_compress(const unsigned char *src, size_t src_len, --- a/lib/lzo/lzo1x_compress.c~lib-lzo-implement-run-length-encoding +++ a/lib/lzo/lzo1x_compress.c @@ -20,7 +20,7 @@ static noinline size_t lzo1x_1_do_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem) + size_t ti, void *wrkmem, signed char *state_offset) { const unsigned char *ip; unsigned char *op; @@ -38,24 +38,82 @@ lzo1x_1_do_compress(const unsigned char const unsigned char *m_pos; size_t t, m_len, m_off; u32 dv; + u32 run_length = 0; literal: ip += 1 + ((ip - ii) >> 5); next: if (unlikely(ip >= ip_end)) break; dv = get_unaligned_le32(ip); - t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; - m_pos = in + dict[t]; - dict[t] = (lzo_dict_t) (ip - in); - if (unlikely(dv != get_unaligned_le32(m_pos))) - goto literal; + + if (dv == 0) { + const unsigned char *ir = ip + 4; + const unsigned char *limit = ip_end + < (ip + MAX_ZERO_RUN_LENGTH + 1) + ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ + defined(LZO_FAST_64BIT_MEMORY_ACCESS) + u64 dv64; + + for (; (ir + 32) <= limit; ir += 32) { + dv64 = get_unaligned((u64 *)ir); + dv64 |= get_unaligned((u64 *)ir + 1); + dv64 |= get_unaligned((u64 *)ir + 2); + dv64 |= get_unaligned((u64 *)ir + 3); + if (dv64) + break; + } + for (; (ir + 8) <= limit; ir += 8) { + dv64 = get_unaligned((u64 *)ir); + if (dv64) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctzll(dv64) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clzll(dv64) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#else + while ((ir < (const unsigned char *) + ALIGN((uintptr_t)ir, 4)) && + (ir < limit) && (*ir == 0)) + ir++; + for (; (ir + 4) <= limit; ir += 4) { + dv = *((u32 *)ir); + if (dv) { +# if defined(__LITTLE_ENDIAN) + ir += __builtin_ctz(dv) >> 3; +# elif defined(__BIG_ENDIAN) + ir += __builtin_clz(dv) >> 3; +# else +# error "missing endian definition" +# endif + break; + } + } +#endif + while (likely(ir < limit) && unlikely(*ir == 0)) + ir++; + run_length = ir - ip; + if (run_length > MAX_ZERO_RUN_LENGTH) + run_length = MAX_ZERO_RUN_LENGTH; + } else { + t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; + m_pos = in + dict[t]; + dict[t] = (lzo_dict_t) (ip - in); + if (unlikely(dv != get_unaligned_le32(m_pos))) + goto literal; + } ii -= ti; ti = 0; t = ip - ii; if (t != 0) { if (t <= 3) { - op[-2] |= t; + op[*state_offset] |= t; COPY4(op, ii); op += t; } else if (t <= 16) { @@ -86,6 +144,17 @@ next: } } + if (unlikely(run_length)) { + ip += run_length; + run_length -= MIN_ZERO_RUN_LENGTH; + put_unaligned_le32((run_length << 21) | 0xfffc18 + | (run_length & 0x7), op); + op += 4; + run_length = 0; + *state_offset = -3; + goto finished_writing_instruction; + } + m_len = 4; { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) @@ -168,7 +237,6 @@ m_len_done: m_off = ip - m_pos; ip += m_len; - ii = ip; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); @@ -205,6 +273,9 @@ m_len_done: *op++ = (m_off << 2); *op++ = (m_off >> 6); } + *state_offset = -2; +finished_writing_instruction: + ii = ip; goto next; } *out_len = op - out; @@ -219,6 +290,12 @@ int lzo1x_1_compress(const unsigned char unsigned char *op = out; size_t l = in_len; size_t t = 0; + signed char state_offset = -2; + + // LZO v0 will never write 17 as first byte, + // so this is used to version the bitstream + *op++ = 17; + *op++ = LZO_VERSION; while (l > 20) { size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1); @@ -227,7 +304,8 @@ int lzo1x_1_compress(const unsigned char break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); + t = lzo1x_1_do_compress(ip, ll, op, out_len, + t, wrkmem, &state_offset); ip += ll; op += *out_len; l -= ll; @@ -240,7 +318,7 @@ int lzo1x_1_compress(const unsigned char if (op == out && t <= 238) { *op++ = (17 + t); } else if (t <= 3) { - op[-2] |= t; + op[state_offset] |= t; } else if (t <= 18) { *op++ = (t - 3); } else { --- a/lib/lzo/lzo1x_decompress_safe.c~lib-lzo-implement-run-length-encoding +++ a/lib/lzo/lzo1x_decompress_safe.c @@ -46,11 +46,23 @@ int lzo1x_decompress_safe(const unsigned const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; + unsigned char bitstream_version; + op = out; ip = in; if (unlikely(in_len < 3)) goto input_overrun; + + if (likely(*ip == 17)) { + bitstream_version = ip[1]; + ip += 2; + if (unlikely(in_len < 5)) + goto input_overrun; + } else { + bitstream_version = 0; + } + if (*ip > 17) { t = *ip++ - 17; if (t < 4) { @@ -151,32 +163,49 @@ copy_literal_run: m_pos -= next >> 2; next &= 3; } else { - m_pos = op; - m_pos -= (t & 8) << 11; - t = (t & 7) + (3 - 1); - if (unlikely(t == 2)) { - size_t offset; - const unsigned char *ip_last = ip; + NEED_IP(2); + next = get_unaligned_le16(ip); + if (((next & 0xfffc) == 0xfffc) && + ((t & 0xf8) == 0x18) && + likely(bitstream_version)) { + NEED_IP(3); + t &= 7; + t |= ip[2] << 3; + t += MIN_ZERO_RUN_LENGTH; + NEED_OP(t); + memset(op, 0, t); + op += t; + next &= 3; + ip += 3; + goto match_next; + } else { + m_pos = op; + m_pos -= (t & 8) << 11; + t = (t & 7) + (3 - 1); + if (unlikely(t == 2)) { + size_t offset; + const unsigned char *ip_last = ip; - while (unlikely(*ip == 0)) { - ip++; - NEED_IP(1); - } - offset = ip - ip_last; - if (unlikely(offset > MAX_255_COUNT)) - return LZO_E_ERROR; + while (unlikely(*ip == 0)) { + ip++; + NEED_IP(1); + } + offset = ip - ip_last; + if (unlikely(offset > MAX_255_COUNT)) + return LZO_E_ERROR; - offset = (offset << 8) - offset; - t += offset + 7 + *ip++; - NEED_IP(2); + offset = (offset << 8) - offset; + t += offset + 7 + *ip++; + NEED_IP(2); + next = get_unaligned_le16(ip); + } + ip += 2; + m_pos -= next >> 2; + next &= 3; + if (m_pos == op) + goto eof_found; + m_pos -= 0x4000; } - next = get_unaligned_le16(ip); - ip += 2; - m_pos -= next >> 2; - next &= 3; - if (m_pos == op) - goto eof_found; - m_pos -= 0x4000; } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) --- a/lib/lzo/lzodefs.h~lib-lzo-implement-run-length-encoding +++ a/lib/lzo/lzodefs.h @@ -13,6 +13,12 @@ */ +/* Version + * 0: original lzo version + * 1: lzo with support for RLE + */ +#define LZO_VERSION 1 + #define COPY4(dst, src) \ put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst)) #if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) @@ -31,6 +37,7 @@ #elif defined(CONFIG_X86_64) || defined(CONFIG_ARM64) #define LZO_USE_CTZ64 1 #define LZO_USE_CTZ32 1 +#define LZO_FAST_64BIT_MEMORY_ACCESS #elif defined(CONFIG_X86) || defined(CONFIG_PPC) #define LZO_USE_CTZ32 1 #elif defined(CONFIG_ARM) @@ -45,7 +52,7 @@ #define M1_MAX_OFFSET 0x0400 #define M2_MAX_OFFSET 0x0800 #define M3_MAX_OFFSET 0x4000 -#define M4_MAX_OFFSET 0xbfff +#define M4_MAX_OFFSET 0xbffe #define M1_MIN_LEN 2 #define M1_MAX_LEN 2 @@ -61,6 +68,9 @@ #define M3_MARKER 32 #define M4_MARKER 16 +#define MIN_ZERO_RUN_LENGTH 4 +#define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH) + #define lzo_dict_t unsigned short #define D_BITS 13 #define D_SIZE (1u << D_BITS) _ Patches currently in -mm which might be from dave.rodgman@xxxxxxx are lib-lzo-tidy-up-ifdefs.patch lib-lzo-implement-run-length-encoding.patch lib-lzo-separate-lzo-rle-from-lzo.patch