mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 04:42:12 +00:00
crypto: adiantum - flush destination page before unmapping
Upon additional review, the new fast path in adiantum_finish() is
missing the call to flush_dcache_page() that scatterwalk_map_and_copy()
was doing. It's apparently debatable whether flush_dcache_page() is
actually needed, as per the discussion at
https://lore.kernel.org/lkml/YYP1lAq46NWzhOf0@casper.infradead.org/T/#u.
However, it appears that currently all the helper functions that write
to a page, such as scatterwalk_map_and_copy(), memcpy_to_page(), and
memzero_page(), do the dcache flush. So do it to be consistent.
Fixes: dadf5e56c9
("crypto: adiantum - add fast path for single-page messages")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
b030c45844
commit
a312e07a65
@ -300,7 +300,8 @@ static int adiantum_finish(struct skcipher_request *req)
|
||||
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
|
||||
if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
|
||||
/* Fast path for single-page destination */
|
||||
void *virt = kmap_local_page(sg_page(dst)) + dst->offset;
|
||||
struct page *page = sg_page(dst);
|
||||
void *virt = kmap_local_page(page) + dst->offset;
|
||||
|
||||
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
|
||||
(u8 *)&digest);
|
||||
@ -310,6 +311,7 @@ static int adiantum_finish(struct skcipher_request *req)
|
||||
}
|
||||
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
|
||||
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
|
||||
flush_dcache_page(page);
|
||||
kunmap_local(virt);
|
||||
} else {
|
||||
/* Slow path that works for any destination scatterlist */
|
||||
|
Loading…
Reference in New Issue
Block a user