@@ -42,38 +42,24 @@ struct skcipher_walk_buffer {
static int skcipher_walk_next(struct skcipher_walk *walk);
-static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
-{
- if (PageHighMem(scatterwalk_page(walk)))
- kunmap_atomic(vaddr);
-}
-
-static inline void *skcipher_map(struct scatter_walk *walk)
-{
- struct page *page = scatterwalk_page(walk);
-
- return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
- offset_in_page(walk->offset);
-}
-
static inline void skcipher_map_src(struct skcipher_walk *walk)
{
- walk->src.virt.addr = skcipher_map(&walk->in);
+ walk->src.virt.addr = scatterwalk_map(&walk->in);
}
static inline void skcipher_map_dst(struct skcipher_walk *walk)
{
- walk->dst.virt.addr = skcipher_map(&walk->out);
+ walk->dst.virt.addr = scatterwalk_map(&walk->out);
}
static inline void skcipher_unmap_src(struct skcipher_walk *walk)
{
- skcipher_unmap(&walk->in, walk->src.virt.addr);
+ scatterwalk_unmap(walk->src.virt.addr);
}
static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
{
- skcipher_unmap(&walk->out, walk->dst.virt.addr);
+ scatterwalk_unmap(walk->dst.virt.addr);
}
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
The skcipher walk API implementation avoids scatterwalk_map() for mapping the source and destination buffers, and invokes kmap_atomic() directly if the buffer in question is not in low memory (which can only happen on 32-bit architectures). This avoids some overhead on 64-bit architectures, and most notably, permits the skcipher code to run with preemption enabled. Now that scatterwalk_map() has been updated to use kmap_local(), none of this is needed, so we can simply use scatterwalk_map/unmap instead. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- crypto/skcipher.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-)