Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 13 ++++---- net/sunrpc/auth_gss/gss_krb5_wrap.c | 11 ++++--- net/sunrpc/auth_gss/svcauth_gss.c | 51 +++++++++++++++++++-------------- net/sunrpc/xdr.c | 3 ++ 4 files changed, 45 insertions(+), 33 deletions(-) diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 6f2d30d7b766..eb5a43b61b42 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -412,8 +412,9 @@ err = crypto_ahash_init(req); if (err) goto out; - err = xdr_process_buf(body, body_offset, body->len - body_offset, - checksummer, req); + err = xdr_process_buf(body, body_offset, + xdr_buf_msglen(body) - body_offset, checksummer, + req); if (err) goto out; if (header != NULL) { @@ -682,12 +683,10 @@ struct decryptor_desc { SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher); u8 *data; struct page **save_pages; - u32 len = buf->len - offset; + u32 len = xdr_buf_msglen(buf) - offset; - if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) { - WARN_ON(0); + if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) return -ENOMEM; - } data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS); if (!data) return -ENOMEM; @@ -800,7 +799,7 @@ struct decryptor_desc { if (err) return GSS_S_FAILURE; - nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; + nbytes = xdr_buf_msglen(buf) - offset - GSS_KRB5_TOK_HDR_LEN; nblocks = (nbytes + blocksize - 1) / blocksize; cbcbytes = 0; if (nblocks > 2) diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 14a0aff0cd84..8d71d561f430 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -405,12 +405,13 @@ static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift) BUG_ON(shift > LOCAL_BUF_LEN); read_bytes_from_xdr_buf(buf, 0, head, shift); - for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) { - this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift)); + for (i = 0; i + shift < xdr_buf_msglen(buf); i += LOCAL_BUF_LEN) { + this_len = min_t(unsigned int, LOCAL_BUF_LEN, + xdr_buf_msglen(buf) - (i + shift)); read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len); write_bytes_to_xdr_buf(buf, i, tmp, this_len); } - write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); + write_bytes_to_xdr_buf(buf, xdr_buf_msglen(buf) - shift, head, shift); } static void _rotate_left(struct xdr_buf *buf, unsigned int shift) @@ -418,7 +419,7 @@ static void _rotate_left(struct xdr_buf *buf, unsigned int shift) int shifted = 0; int this_shift; - shift %= buf->len; + shift %= xdr_buf_msglen(buf); while (shifted < shift) { this_shift = min(shift - shifted, LOCAL_BUF_LEN); rotate_buf_a_little(buf, this_shift); @@ -430,7 +431,7 @@ static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift) { struct xdr_buf subbuf; - xdr_buf_subsegment(buf, &subbuf, base, buf->len - base); + xdr_buf_subsegment(buf, &subbuf, base, xdr_buf_msglen(buf) - base); _rotate_left(&subbuf, shift); } diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index c62d1f10978b..893b9114cb8a 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -907,12 +907,6 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) return stat; } -static inline int -total_buf_len(struct xdr_buf *buf) -{ - return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len; -} - static void fix_priv_head(struct xdr_buf *buf, int pad) { @@ -941,7 +935,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) /* buf->len is the number of bytes from the original start of the * request to the end, where head[0].iov_len is just the bytes * not yet read from the head, so these two values are different: */ - remaining_len = total_buf_len(buf); + remaining_len = xdr_buf_msglen(buf); if (priv_len > remaining_len) return -EINVAL; pad = remaining_len - priv_len; @@ -961,7 +955,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) /* XXX: This is very inefficient. It would be better to either do * this while we encrypt, or maybe in the receive code, if we can peak * ahead and work out the service and mechanism there. */ - offset = buf->head[0].iov_len % 4; + offset = buf->head[0].iov_len & 3; if (offset) { buf->buflen = RPCSVC_MAXPAYLOAD; xdr_shift_buf(buf, offset); @@ -1671,12 +1665,30 @@ static void destroy_use_gss_proxy_proc_entry(struct net *net) {} int integ_offset, integ_len; int stat = -EINVAL; + /* Fill in pad bytes for xdr_buf::pages: */ + if (resbuf->page_pad) { + if (resbuf->tail[0].iov_base != NULL) { + memmove((u8 *)resbuf->tail[0].iov_base + sizeof(__be32), + resbuf->tail[0].iov_base, + resbuf->tail[0].iov_len); + } else { + resbuf->tail[0].iov_base = + (u8 *)resbuf->head[0].iov_base + + resbuf->head[0].iov_len; + resbuf->tail[0].iov_len = 0; + } + memset(resbuf->tail[0].iov_base, 0, sizeof(__be32)); + resbuf->tail[0].iov_base = (u8 *)resbuf->tail[0].iov_base + + (4 - resbuf->page_pad); + resbuf->tail[0].iov_len += resbuf->page_pad; + resbuf->page_pad = 0; + } + p = svcauth_gss_prepare_to_wrap(resbuf, gsd); if (p == NULL) goto out; integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; - integ_len = resbuf->len - integ_offset; - BUG_ON(integ_len % 4); + integ_len = xdr_buf_msglen(resbuf) - integ_offset; *p++ = htonl(integ_len); *p++ = htonl(gc->gc_seq); if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) { @@ -1716,7 +1728,6 @@ static void destroy_use_gss_proxy_proc_entry(struct net *net) {} struct page **inpages = NULL; __be32 *p, *len; int offset; - int pad; p = svcauth_gss_prepare_to_wrap(resbuf, gsd); if (p == NULL) @@ -1735,7 +1746,7 @@ static void destroy_use_gss_proxy_proc_entry(struct net *net) {} * there is RPC_MAX_AUTH_SIZE slack space available in * both the head and tail. */ - if (resbuf->tail[0].iov_base) { + if (resbuf->tail[0].iov_base != NULL) { BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base + PAGE_SIZE); BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); @@ -1746,6 +1757,7 @@ static void destroy_use_gss_proxy_proc_entry(struct net *net) {} resbuf->tail[0].iov_base, resbuf->tail[0].iov_len); resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; + /* XXX: insert padding for resbuf->pages */ } /* * If there is no current tail data, make sure there is @@ -1754,21 +1766,18 @@ static void destroy_use_gss_proxy_proc_entry(struct net *net) {} * is RPC_MAX_AUTH_SIZE slack space available in both the * head and tail. */ - if (resbuf->tail[0].iov_base == NULL) { + else { if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) return -ENOMEM; resbuf->tail[0].iov_base = resbuf->head[0].iov_base + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; - resbuf->tail[0].iov_len = 0; + memset(resbuf->tail[0].iov_base, 0, sizeof(__be32)); + resbuf->tail[0].iov_len = resbuf->page_pad; + resbuf->page_pad = 0; } if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages)) return -ENOMEM; - *len = htonl(resbuf->len - offset); - pad = 3 - ((resbuf->len - offset - 1)&3); - p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len); - memset(p, 0, pad); - resbuf->tail[0].iov_len += pad; - resbuf->len += pad; + *len = cpu_to_be32(xdr_buf_msglen(resbuf) - offset); return 0; } @@ -1789,7 +1798,7 @@ static void destroy_use_gss_proxy_proc_entry(struct net *net) {} /* normally not set till svc_send, but we need it here: */ /* XXX: what for? Do we mess it up the moment we call svc_putu32 * or whatever? */ - resbuf->len = total_buf_len(resbuf); + resbuf->len = xdr_buf_msglen(resbuf); switch (gc->gc_svc) { case RPC_GSS_SVC_NONE: break; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index d2dadb200024..798ebb406058 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1132,6 +1132,9 @@ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) base -= buf->page_len; subbuf->page_len = 0; } + /* XXX: Still need to deal with case where buf->page_pad is non-zero */ + WARN_ON(buf->page_pad); + subbuf->page_pad = 0; if (base < buf->tail[0].iov_len) { subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;