1
0
Fork 0
alistair23-linux/crypto/lzo.c

164 lines
3.3 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/lzo.h>
#include <crypto/internal/scompress.h>
struct lzo_ctx {
void *lzo_comp_mem;
};
static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
{
void *ctx;
treewide: use kv[mz]alloc* rather than opencoded variants There are many code paths opencoding kvmalloc. Let's use the helper instead. The main difference to kvmalloc is that those users are usually not considering all the aspects of the memory allocator. E.g. allocation requests <= 32kB (with 4kB pages) are basically never failing and invoke OOM killer to satisfy the allocation. This sounds too disruptive for something that has a reasonable fallback - the vmalloc. On the other hand those requests might fallback to vmalloc even when the memory allocator would succeed after several more reclaim/compaction attempts previously. There is no guarantee something like that happens though. This patch converts many of those places to kv[mz]alloc* helpers because they are more conservative. Link: http://lkml.kernel.org/r/20170306103327.2766-2-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> # Xen bits Acked-by: Kees Cook <keescook@chromium.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Andreas Dilger <andreas.dilger@intel.com> # Lustre Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> # KVM/s390 Acked-by: Dan Williams <dan.j.williams@intel.com> # nvdim Acked-by: David Sterba <dsterba@suse.com> # btrfs Acked-by: Ilya Dryomov <idryomov@gmail.com> # Ceph Acked-by: Tariq Toukan <tariqt@mellanox.com> # mlx4 Acked-by: Leon Romanovsky <leonro@mellanox.com> # mlx5 Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Anton Vorontsov <anton@enomsg.org> Cc: Colin Cross <ccross@android.com> Cc: Tony Luck <tony.luck@intel.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Kent Overstreet <kent.overstreet@gmail.com> Cc: Santosh Raspatur <santosh@chelsio.com> Cc: Hariprasad S <hariprasad@chelsio.com> Cc: Yishai Hadas <yishaih@mellanox.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: "Yan, Zheng" <zyan@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-08 16:57:27 -06:00
ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}
static int lzo_init(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
if (IS_ERR(ctx->lzo_comp_mem))
return -ENOMEM;
return 0;
}
static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
kvfree(ctx);
}
static void lzo_exit(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
lzo_free_ctx(NULL, ctx->lzo_comp_mem);
}
static int __lzo_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
*dlen = tmp_len;
return 0;
}
static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
}
static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lzo_compress(src, slen, dst, dlen, ctx);
}
static int __lzo_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
int err;
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
err = lzo1x_decompress_safe(src, slen, dst, &tmp_len);
if (err != LZO_E_OK)
return -EINVAL;
*dlen = tmp_len;
return 0;
}
static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
return __lzo_decompress(src, slen, dst, dlen);
}
static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lzo_decompress(src, slen, dst, dlen);
}
static struct crypto_alg alg = {
.cra_name = "lzo",
.cra_driver_name = "lzo-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lzo_ctx),
.cra_module = THIS_MODULE,
.cra_init = lzo_init,
.cra_exit = lzo_exit,
.cra_u = { .compress = {
.coa_compress = lzo_compress,
.coa_decompress = lzo_decompress } }
};
static struct scomp_alg scomp = {
.alloc_ctx = lzo_alloc_ctx,
.free_ctx = lzo_free_ctx,
.compress = lzo_scompress,
.decompress = lzo_sdecompress,
.base = {
.cra_name = "lzo",
.cra_driver_name = "lzo-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init lzo_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret) {
crypto_unregister_alg(&alg);
return ret;
}
return ret;
}
static void __exit lzo_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
subsys_initcall(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO Compression Algorithm");
MODULE_ALIAS_CRYPTO("lzo");