Kernel  |  4.1

下载     查看原文件
C++程序  |  855行  |  18.38 KB
/*
 * algif_skcipher: User-space interface for skcipher algorithms
 *
 * This file provides the user-space API for symmetric key ciphers.
 *
 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/if_alg.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>

struct skcipher_sg_list {
	struct list_head list;

	int cur;

	struct scatterlist sg[0];
};

struct skcipher_ctx {
	struct list_head tsgl;
	struct af_alg_sgl rsgl;

	void *iv;

	struct af_alg_completion completion;

	atomic_t inflight;
	unsigned used;

	unsigned int len;
	bool more;
	bool merge;
	bool enc;

	struct ablkcipher_request req;
};

struct skcipher_async_rsgl {
	struct af_alg_sgl sgl;
	struct list_head list;
};

struct skcipher_async_req {
	struct kiocb *iocb;
	struct skcipher_async_rsgl first_sgl;
	struct list_head list;
	struct scatterlist *tsg;
	char iv[];
};

#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
	crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))

#define GET_REQ_SIZE(ctx) \
	crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))

#define GET_IV_SIZE(ctx) \
	crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))

#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
		      sizeof(struct scatterlist) - 1)

static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
{
	struct skcipher_async_rsgl *rsgl, *tmp;
	struct scatterlist *sgl;
	struct scatterlist *sg;
	int i, n;

	list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
		af_alg_free_sg(&rsgl->sgl);
		if (rsgl != &sreq->first_sgl)
			kfree(rsgl);
	}
	sgl = sreq->tsg;
	n = sg_nents(sgl);
	for_each_sg(sgl, sg, n, i)
		put_page(sg_page(sg));

	kfree(sreq->tsg);
}

static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
	struct sock *sk = req->data;
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
	struct kiocb *iocb = sreq->iocb;

	atomic_dec(&ctx->inflight);
	skcipher_free_async_sgls(sreq);
	kfree(req);
	iocb->ki_complete(iocb, err, err);
}

static inline int skcipher_sndbuf(struct sock *sk)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;

	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
			  ctx->used, 0);
}

static inline bool skcipher_writable(struct sock *sk)
{
	return PAGE_SIZE <= skcipher_sndbuf(sk);
}

static int skcipher_alloc_sgl(struct sock *sk)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct skcipher_sg_list *sgl;
	struct scatterlist *sg = NULL;

	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
	if (!list_empty(&ctx->tsgl))
		sg = sgl->sg;

	if (!sg || sgl->cur >= MAX_SGL_ENTS) {
		sgl = sock_kmalloc(sk, sizeof(*sgl) +
				       sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
				   GFP_KERNEL);
		if (!sgl)
			return -ENOMEM;

		sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
		sgl->cur = 0;

		if (sg)
			scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);

		list_add_tail(&sgl->list, &ctx->tsgl);
	}

	return 0;
}

static void skcipher_pull_sgl(struct sock *sk, int used, int put)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct skcipher_sg_list *sgl;
	struct scatterlist *sg;
	int i;

	while (!list_empty(&ctx->tsgl)) {
		sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
				       list);
		sg = sgl->sg;

		for (i = 0; i < sgl->cur; i++) {
			int plen = min_t(int, used, sg[i].length);

			if (!sg_page(sg + i))
				continue;

			sg[i].length -= plen;
			sg[i].offset += plen;

			used -= plen;
			ctx->used -= plen;

			if (sg[i].length)
				return;
			if (put)
				put_page(sg_page(sg + i));
			sg_assign_page(sg + i, NULL);
		}

		list_del(&sgl->list);
		sock_kfree_s(sk, sgl,
			     sizeof(*sgl) + sizeof(sgl->sg[0]) *
					    (MAX_SGL_ENTS + 1));
	}

	if (!ctx->used)
		ctx->merge = 0;
}

static void skcipher_free_sgl(struct sock *sk)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;

	skcipher_pull_sgl(sk, ctx->used, 1);
}

static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
{
	long timeout;
	DEFINE_WAIT(wait);
	int err = -ERESTARTSYS;

	if (flags & MSG_DONTWAIT)
		return -EAGAIN;

	set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);

	for (;;) {
		if (signal_pending(current))
			break;
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
		timeout = MAX_SCHEDULE_TIMEOUT;
		if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
			err = 0;
			break;
		}
	}
	finish_wait(sk_sleep(sk), &wait);

	return err;
}

static void skcipher_wmem_wakeup(struct sock *sk)
{
	struct socket_wq *wq;

	if (!skcipher_writable(sk))
		return;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
	if (wq_has_sleeper(wq))
		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
							   POLLRDNORM |
							   POLLRDBAND);
	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
	rcu_read_unlock();
}

static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	long timeout;
	DEFINE_WAIT(wait);
	int err = -ERESTARTSYS;

	if (flags & MSG_DONTWAIT) {
		return -EAGAIN;
	}

	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);

	for (;;) {
		if (signal_pending(current))
			break;
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
		timeout = MAX_SCHEDULE_TIMEOUT;
		if (sk_wait_event(sk, &timeout, ctx->used)) {
			err = 0;
			break;
		}
	}
	finish_wait(sk_sleep(sk), &wait);

	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);

	return err;
}

static void skcipher_data_wakeup(struct sock *sk)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct socket_wq *wq;

	if (!ctx->used)
		return;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
	if (wq_has_sleeper(wq))
		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
							   POLLRDNORM |
							   POLLRDBAND);
	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
	rcu_read_unlock();
}

static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
			    size_t size)
{
	struct sock *sk = sock->sk;
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
	struct skcipher_sg_list *sgl;
	struct af_alg_control con = {};
	long copied = 0;
	bool enc = 0;
	bool init = 0;
	int err;
	int i;

	if (msg->msg_controllen) {
		err = af_alg_cmsg_send(msg, &con);
		if (err)
			return err;

		init = 1;
		switch (con.op) {
		case ALG_OP_ENCRYPT:
			enc = 1;
			break;
		case ALG_OP_DECRYPT:
			enc = 0;
			break;
		default:
			return -EINVAL;
		}

		if (con.iv && con.iv->ivlen != ivsize)
			return -EINVAL;
	}

	err = -EINVAL;

	lock_sock(sk);
	if (!ctx->more && ctx->used)
		goto unlock;

	if (init) {
		ctx->enc = enc;
		if (con.iv)
			memcpy(ctx->iv, con.iv->iv, ivsize);
	}

	while (size) {
		struct scatterlist *sg;
		unsigned long len = size;
		int plen;

		if (ctx->merge) {
			sgl = list_entry(ctx->tsgl.prev,
					 struct skcipher_sg_list, list);
			sg = sgl->sg + sgl->cur - 1;
			len = min_t(unsigned long, len,
				    PAGE_SIZE - sg->offset - sg->length);

			err = memcpy_from_msg(page_address(sg_page(sg)) +
					      sg->offset + sg->length,
					      msg, len);
			if (err)
				goto unlock;

			sg->length += len;
			ctx->merge = (sg->offset + sg->length) &
				     (PAGE_SIZE - 1);

			ctx->used += len;
			copied += len;
			size -= len;
			continue;
		}

		if (!skcipher_writable(sk)) {
			err = skcipher_wait_for_wmem(sk, msg->msg_flags);
			if (err)
				goto unlock;
		}

		len = min_t(unsigned long, len, skcipher_sndbuf(sk));

		err = skcipher_alloc_sgl(sk);
		if (err)
			goto unlock;

		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
		sg = sgl->sg;
		sg_unmark_end(sg + sgl->cur);
		do {
			i = sgl->cur;
			plen = min_t(int, len, PAGE_SIZE);

			sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
			err = -ENOMEM;
			if (!sg_page(sg + i))
				goto unlock;

			err = memcpy_from_msg(page_address(sg_page(sg + i)),
					      msg, plen);
			if (err) {
				__free_page(sg_page(sg + i));
				sg_assign_page(sg + i, NULL);
				goto unlock;
			}

			sg[i].length = plen;
			len -= plen;
			ctx->used += plen;
			copied += plen;
			size -= plen;
			sgl->cur++;
		} while (len && sgl->cur < MAX_SGL_ENTS);

		if (!size)
			sg_mark_end(sg + sgl->cur - 1);

		ctx->merge = plen & (PAGE_SIZE - 1);
	}

	err = 0;

	ctx->more = msg->msg_flags & MSG_MORE;

unlock:
	skcipher_data_wakeup(sk);
	release_sock(sk);

	return copied ?: err;
}

static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
				 int offset, size_t size, int flags)
{
	struct sock *sk = sock->sk;
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct skcipher_sg_list *sgl;
	int err = -EINVAL;

	if (flags & MSG_SENDPAGE_NOTLAST)
		flags |= MSG_MORE;

	lock_sock(sk);
	if (!ctx->more && ctx->used)
		goto unlock;

	if (!size)
		goto done;

	if (!skcipher_writable(sk)) {
		err = skcipher_wait_for_wmem(sk, flags);
		if (err)
			goto unlock;
	}

	err = skcipher_alloc_sgl(sk);
	if (err)
		goto unlock;

	ctx->merge = 0;
	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);

	if (sgl->cur)
		sg_unmark_end(sgl->sg + sgl->cur - 1);

	sg_mark_end(sgl->sg + sgl->cur);
	get_page(page);
	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
	sgl->cur++;
	ctx->used += size;

done:
	ctx->more = flags & MSG_MORE;

unlock:
	skcipher_data_wakeup(sk);
	release_sock(sk);

	return err ?: size;
}

static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
{
	struct skcipher_sg_list *sgl;
	struct scatterlist *sg;
	int nents = 0;

	list_for_each_entry(sgl, &ctx->tsgl, list) {
		sg = sgl->sg;

		while (!sg->length)
			sg++;

		nents += sg_nents(sg);
	}
	return nents;
}

static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
				  int flags)
{
	struct sock *sk = sock->sk;
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct skcipher_sg_list *sgl;
	struct scatterlist *sg;
	struct skcipher_async_req *sreq;
	struct ablkcipher_request *req;
	struct skcipher_async_rsgl *last_rsgl = NULL;
	unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
	unsigned int reqlen = sizeof(struct skcipher_async_req) +
				GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
	int err = -ENOMEM;
	bool mark = false;

	lock_sock(sk);
	req = kmalloc(reqlen, GFP_KERNEL);
	if (unlikely(!req))
		goto unlock;

	sreq = GET_SREQ(req, ctx);
	sreq->iocb = msg->msg_iocb;
	memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
	INIT_LIST_HEAD(&sreq->list);
	sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
	if (unlikely(!sreq->tsg)) {
		kfree(req);
		goto unlock;
	}
	sg_init_table(sreq->tsg, tx_nents);
	memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
	ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					skcipher_async_cb, sk);

	while (iov_iter_count(&msg->msg_iter)) {
		struct skcipher_async_rsgl *rsgl;
		int used;

		if (!ctx->used) {
			err = skcipher_wait_for_data(sk, flags);
			if (err)
				goto free;
		}
		sgl = list_first_entry(&ctx->tsgl,
				       struct skcipher_sg_list, list);
		sg = sgl->sg;

		while (!sg->length)
			sg++;

		used = min_t(unsigned long, ctx->used,
			     iov_iter_count(&msg->msg_iter));
		used = min_t(unsigned long, used, sg->length);

		if (txbufs == tx_nents) {
			struct scatterlist *tmp;
			int x;
			/* Ran out of tx slots in async request
			 * need to expand */
			tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
				      GFP_KERNEL);
			if (!tmp)
				goto free;

			sg_init_table(tmp, tx_nents * 2);
			for (x = 0; x < tx_nents; x++)
				sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
					    sreq->tsg[x].length,
					    sreq->tsg[x].offset);
			kfree(sreq->tsg);
			sreq->tsg = tmp;
			tx_nents *= 2;
			mark = true;
		}
		/* Need to take over the tx sgl from ctx
		 * to the asynch req - these sgls will be freed later */
		sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
			    sg->offset);

		if (list_empty(&sreq->list)) {
			rsgl = &sreq->first_sgl;
			list_add_tail(&rsgl->list, &sreq->list);
		} else {
			rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
			if (!rsgl) {
				err = -ENOMEM;
				goto free;
			}
			list_add_tail(&rsgl->list, &sreq->list);
		}

		used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
		err = used;
		if (used < 0)
			goto free;
		if (last_rsgl)
			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);

		last_rsgl = rsgl;
		len += used;
		skcipher_pull_sgl(sk, used, 0);
		iov_iter_advance(&msg->msg_iter, used);
	}

	if (mark)
		sg_mark_end(sreq->tsg + txbufs - 1);

	ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
				     len, sreq->iv);
	err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
			 crypto_ablkcipher_decrypt(req);
	if (err == -EINPROGRESS) {
		atomic_inc(&ctx->inflight);
		err = -EIOCBQUEUED;
		goto unlock;
	}
free:
	skcipher_free_async_sgls(sreq);
	kfree(req);
unlock:
	skcipher_wmem_wakeup(sk);
	release_sock(sk);
	return err;
}

static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
				 int flags)
{
	struct sock *sk = sock->sk;
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
		&ctx->req));
	struct skcipher_sg_list *sgl;
	struct scatterlist *sg;
	int err = -EAGAIN;
	int used;
	long copied = 0;

	lock_sock(sk);
	while (msg_data_left(msg)) {
		sgl = list_first_entry(&ctx->tsgl,
				       struct skcipher_sg_list, list);
		sg = sgl->sg;

		while (!sg->length)
			sg++;

		if (!ctx->used) {
			err = skcipher_wait_for_data(sk, flags);
			if (err)
				goto unlock;
		}

		used = min_t(unsigned long, ctx->used, msg_data_left(msg));

		used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
		err = used;
		if (err < 0)
			goto unlock;

		if (ctx->more || used < ctx->used)
			used -= used % bs;

		err = -EINVAL;
		if (!used)
			goto free;

		ablkcipher_request_set_crypt(&ctx->req, sg,
					     ctx->rsgl.sg, used,
					     ctx->iv);

		err = af_alg_wait_for_completion(
				ctx->enc ?
					crypto_ablkcipher_encrypt(&ctx->req) :
					crypto_ablkcipher_decrypt(&ctx->req),
				&ctx->completion);

free:
		af_alg_free_sg(&ctx->rsgl);

		if (err)
			goto unlock;

		copied += used;
		skcipher_pull_sgl(sk, used, 1);
		iov_iter_advance(&msg->msg_iter, used);
	}

	err = 0;

unlock:
	skcipher_wmem_wakeup(sk);
	release_sock(sk);

	return copied ?: err;
}

static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
			    size_t ignored, int flags)
{
	return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
		skcipher_recvmsg_async(sock, msg, flags) :
		skcipher_recvmsg_sync(sock, msg, flags);
}

static unsigned int skcipher_poll(struct file *file, struct socket *sock,
				  poll_table *wait)
{
	struct sock *sk = sock->sk;
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	unsigned int mask;

	sock_poll_wait(file, sk_sleep(sk), wait);
	mask = 0;

	if (ctx->used)
		mask |= POLLIN | POLLRDNORM;

	if (skcipher_writable(sk))
		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;

	return mask;
}

static struct proto_ops algif_skcipher_ops = {
	.family		=	PF_ALG,

	.connect	=	sock_no_connect,
	.socketpair	=	sock_no_socketpair,
	.getname	=	sock_no_getname,
	.ioctl		=	sock_no_ioctl,
	.listen		=	sock_no_listen,
	.shutdown	=	sock_no_shutdown,
	.getsockopt	=	sock_no_getsockopt,
	.mmap		=	sock_no_mmap,
	.bind		=	sock_no_bind,
	.accept		=	sock_no_accept,
	.setsockopt	=	sock_no_setsockopt,

	.release	=	af_alg_release,
	.sendmsg	=	skcipher_sendmsg,
	.sendpage	=	skcipher_sendpage,
	.recvmsg	=	skcipher_recvmsg,
	.poll		=	skcipher_poll,
};

static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
	return crypto_alloc_ablkcipher(name, type, mask);
}

static void skcipher_release(void *private)
{
	crypto_free_ablkcipher(private);
}

static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
	return crypto_ablkcipher_setkey(private, key, keylen);
}

static void skcipher_wait(struct sock *sk)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	int ctr = 0;

	while (atomic_read(&ctx->inflight) && ctr++ < 100)
		msleep(100);
}

static void skcipher_sock_destruct(struct sock *sk)
{
	struct alg_sock *ask = alg_sk(sk);
	struct skcipher_ctx *ctx = ask->private;
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);

	if (atomic_read(&ctx->inflight))
		skcipher_wait(sk);

	skcipher_free_sgl(sk);
	sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
	sock_kfree_s(sk, ctx, ctx->len);
	af_alg_release_parent(sk);
}

static int skcipher_accept_parent(void *private, struct sock *sk)
{
	struct skcipher_ctx *ctx;
	struct alg_sock *ask = alg_sk(sk);
	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);

	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
			       GFP_KERNEL);
	if (!ctx->iv) {
		sock_kfree_s(sk, ctx, len);
		return -ENOMEM;
	}

	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));

	INIT_LIST_HEAD(&ctx->tsgl);
	ctx->len = len;
	ctx->used = 0;
	ctx->more = 0;
	ctx->merge = 0;
	ctx->enc = 0;
	atomic_set(&ctx->inflight, 0);
	af_alg_init_completion(&ctx->completion);

	ask->private = ctx;

	ablkcipher_request_set_tfm(&ctx->req, private);
	ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					af_alg_complete, &ctx->completion);

	sk->sk_destruct = skcipher_sock_destruct;

	return 0;
}

static const struct af_alg_type algif_type_skcipher = {
	.bind		=	skcipher_bind,
	.release	=	skcipher_release,
	.setkey		=	skcipher_setkey,
	.accept		=	skcipher_accept_parent,
	.ops		=	&algif_skcipher_ops,
	.name		=	"skcipher",
	.owner		=	THIS_MODULE
};

static int __init algif_skcipher_init(void)
{
	return af_alg_register_type(&algif_type_skcipher);
}

static void __exit algif_skcipher_exit(void)
{
	int err = af_alg_unregister_type(&algif_type_skcipher);
	BUG_ON(err);
}

module_init(algif_skcipher_init);
module_exit(algif_skcipher_exit);
MODULE_LICENSE("GPL");