This avoids bug prone open coding of the sg offset handling and also helps to document the limitations of mapping scatterlist entries. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- include/linux/scatterlist.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index b96f0d0b5b8f..524cd8448a48 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -2,6 +2,7 @@ #ifndef _LINUX_SCATTERLIST_H #define _LINUX_SCATTERLIST_H +#include <linux/highmem.h> #include <linux/string.h> #include <linux/types.h> #include <linux/bug.h> @@ -239,6 +240,31 @@ static inline void *sg_virt(struct scatterlist *sg) return page_address(sg_page(sg)) + sg->offset; } +/** + * sg_kmap_atomic - map a S/G list entry to a kernel address + * @sg: scatterlist entry + * + * Return a kernel address for scatterlist entry by kmapping it. Note that + * this function must only be called on scatterlist entries that do not span + * multiple pages. + */ +static inline void *sg_kmap_atomic(struct scatterlist *sg) +{ + if (WARN_ON_ONCE(sg->offset + sg->length > PAGE_SIZE)) + return NULL; + return kmap_atomic(sg_page(sg)) + sg->offset; +} + +/** + * sg_kunmap_atomic - unmap a S/G list entry to a kernel address + * @sg: scatterlist entry + * @ptr: address returned from sg_kmap_atomic + */ +static inline void sg_kunmap_atomic(struct scatterlist *sg, void *ptr) +{ + kunmap_atomic(ptr - sg->offset); +} + /** * sg_init_marker - Initialize markers in sg table * @sgl: The SG table -- 2.20.1