/* * lib/dma-noop.c * * Simple DMA noop-ops that map 1:1 with memory */ #include #include #include #include static void *dma_noop_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret) *dma_handle = virt_to_phys(ret); return ret; } static void dma_noop_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { free_pages((unsigned long)cpu_addr, get_order(size)); } static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { return page_to_phys(page) + offset; } static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { int i; struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { void *va; BUG_ON(!sg_page(sg)); va = sg_virt(sg); sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); sg_dma_len(sg) = sg->length; } return nents; } static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } static int dma_noop_supported(struct device *dev, u64 mask) { return 1; } struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, .map_sg = dma_noop_map_sg, .mapping_error = dma_noop_mapping_error, .dma_supported = dma_noop_supported, }; EXPORT_SYMBOL(dma_noop_ops); ux/net-next.git/refs/?h=nds-private-remove&id=11e3b725cfc282efe9d4a354153e99d86a16af08'>refslogtreecommitdiff
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2017-01-17 13:46:29 +0000
committerHerbert Xu <herbert@gondor.apana.org.au>2017-01-23 22:41:33 +0800
commit11e3b725cfc282efe9d4a354153e99d86a16af08 (patch)
tree8b5b9e0e1bcae1ab98ee652ffb7b13b05c209bd6 /tools/thermal/tmon/tmon.c
parentd6040764adcb5cb6de1489422411d701c158bb69 (diff)
crypto: arm64/aes-blk - honour iv_out requirement in CBC and CTR modes
Update the ARMv8 Crypto Extensions and the plain NEON AES implementations in CBC and CTR modes to return the next IV back to the skcipher API client. This is necessary for chaining to work correctly. Note that for CTR, this is only done if the request is a round multiple of the block size, since otherwise, chaining is impossible anyway. Cc: <stable@vger.kernel.org> # v3.16+ Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'tools/thermal/tmon/tmon.c')