--- net/sunrpc/xprtrdma/svc_rdma_rw.c | 13 +++++++++++++ net/sunrpc/xprtrdma/svc_rdma_sendto.c | 27 +++++++++++++++++++-------- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 467d40a1dffa..a7fb886ea136 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -14,6 +14,8 @@ #include "xprt_rdma.h" #include <trace/events/rpcrdma.h> +static const __be32 xdr_padding = xdr_zero; + static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); @@ -559,6 +561,9 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, { struct svc_rdma_write_info *info; int consumed, ret; + struct kvec pad = { + .iov_base = (void *)&xdr_padding, + }; info = svc_rdma_write_info_alloc(rdma, rp_ch); if (!info) @@ -577,6 +582,14 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch, if (ret < 0) goto out_err; consumed += xdr->page_len; + + if (xdr->page_pad) { + pad.iov_len = xdr->page_pad; + ret = svc_rdma_send_xdr_kvec(info, &pad); + if (ret < 0) + goto out_err; + consumed += pad.iov_len; + } } if (xdr->tail[0].iov_len) { diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 33f817519964..d0f9acfe60a6 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -112,6 +112,8 @@ #include "xprt_rdma.h" #include <trace/events/rpcrdma.h> +static const __be32 xdr_padding = xdr_zero; + static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); static inline struct svc_rdma_send_ctxt * @@ -320,11 +322,6 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) return ret; } -static u32 xdr_padsize(u32 len) -{ - return (len & 3) ? (4 - (len & 3)) : 0; -} - /* Returns length of transport header, in bytes. */ static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp) @@ -561,6 +558,8 @@ static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, remaining); pageoff = 0; } + if (xdr->page_pad) + ++elements; } /* xdr->tail */ @@ -593,7 +592,7 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, if (wr_lst) { u32 xdrpad; - xdrpad = xdr_padsize(xdr->page_len); + xdrpad = xdr_pad_size(xdr->page_len); if (taillen && xdrpad) { tailbase += xdrpad; taillen -= xdrpad; @@ -614,12 +613,16 @@ static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, dst += len; pageoff = 0; } + if (xdr->page_pad) { + memcpy(dst, &xdr_padding, xdr->page_pad); + dst += xdr->page_pad; + } } if (taillen) memcpy(dst, tailbase, taillen); - ctxt->sc_sges[0].length += xdr->len; + ctxt->sc_sges[0].length += xdr_buf_msglen(xdr); ib_dma_sync_single_for_device(rdma->sc_pd->device, ctxt->sc_sges[0].addr, ctxt->sc_sges[0].length, @@ -668,7 +671,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, if (wr_lst) { base = xdr->tail[0].iov_base; len = xdr->tail[0].iov_len; - xdr_pad = xdr_padsize(xdr->page_len); + xdr_pad = xdr_pad_size(xdr->page_len); if (len && xdr_pad) { base += xdr_pad; @@ -693,6 +696,14 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, remaining -= len; page_off = 0; } + if (xdr->page_pad) { + ++ctxt->sc_cur_sge_no; + ret = svc_rdma_dma_map_buf(rdma, ctxt, + (unsigned char *)&xdr_padding, + xdr->page_pad); + if (ret < 0) + return ret; + } base = xdr->tail[0].iov_base; len = xdr->tail[0].iov_len;