dm-crypt: remove per-cpu structure
Remove per-cpu structure and make it per-convert_context instead. This allows moving requests between different cpus. CRs-Fixed: 339113 Change-Id: Iadc618920c7bdec0b8bbe2accdce1b69dbce5582 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Shashank Mittal <mittals@codeaurora.org>
This commit is contained in:
committed by
Stephen Boyd
parent
9beb8dff9f
commit
d4c4e6a028
@@ -18,7 +18,6 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/page.h>
|
||||
@@ -44,6 +43,7 @@ struct convert_context {
|
||||
unsigned int idx_out;
|
||||
sector_t sector;
|
||||
atomic_t pending;
|
||||
struct ablkcipher_request *req;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -104,16 +104,8 @@ struct iv_lmk_private {
|
||||
*/
|
||||
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
|
||||
|
||||
/*
|
||||
* Duplicated per-CPU state for cipher.
|
||||
*/
|
||||
struct crypt_cpu {
|
||||
struct ablkcipher_request *req;
|
||||
};
|
||||
|
||||
/*
|
||||
* The fields in here must be read only after initialization,
|
||||
* changing state should be in crypt_cpu.
|
||||
*/
|
||||
struct crypt_config {
|
||||
struct dm_dev *dev;
|
||||
@@ -143,12 +135,6 @@ struct crypt_config {
|
||||
sector_t iv_offset;
|
||||
unsigned int iv_size;
|
||||
|
||||
/*
|
||||
* Duplicated per cpu state. Access through
|
||||
* per_cpu_ptr() only.
|
||||
*/
|
||||
struct crypt_cpu __percpu *cpu;
|
||||
|
||||
/* ESSIV: struct crypto_cipher *essiv_tfm */
|
||||
void *iv_private;
|
||||
struct crypto_ablkcipher **tfms;
|
||||
@@ -184,11 +170,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
|
||||
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
||||
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
|
||||
|
||||
static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
|
||||
{
|
||||
return this_cpu_ptr(cc->cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this to access cipher attributes that are the same for each CPU.
|
||||
*/
|
||||
@@ -740,16 +721,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
||||
static void crypt_alloc_req(struct crypt_config *cc,
|
||||
struct convert_context *ctx)
|
||||
{
|
||||
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
||||
unsigned key_index = ctx->sector & (cc->tfms_count - 1);
|
||||
|
||||
if (!this_cc->req)
|
||||
this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
||||
if (!ctx->req)
|
||||
ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
|
||||
|
||||
ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
|
||||
ablkcipher_request_set_callback(this_cc->req,
|
||||
ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
|
||||
ablkcipher_request_set_callback(ctx->req,
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
|
||||
kcryptd_async_done, dmreq_of_req(cc, ctx->req));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -758,7 +738,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
|
||||
static int crypt_convert(struct crypt_config *cc,
|
||||
struct convert_context *ctx)
|
||||
{
|
||||
struct crypt_cpu *this_cc = this_crypt_config(cc);
|
||||
int r;
|
||||
|
||||
atomic_set(&ctx->pending, 1);
|
||||
@@ -770,7 +749,7 @@ static int crypt_convert(struct crypt_config *cc,
|
||||
|
||||
atomic_inc(&ctx->pending);
|
||||
|
||||
r = crypt_convert_block(cc, ctx, this_cc->req);
|
||||
r = crypt_convert_block(cc, ctx, ctx->req);
|
||||
|
||||
switch (r) {
|
||||
/* async */
|
||||
@@ -779,7 +758,7 @@ static int crypt_convert(struct crypt_config *cc,
|
||||
INIT_COMPLETION(ctx->restart);
|
||||
/* fall through*/
|
||||
case -EINPROGRESS:
|
||||
this_cc->req = NULL;
|
||||
ctx->req = NULL;
|
||||
ctx->sector++;
|
||||
continue;
|
||||
|
||||
@@ -888,6 +867,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
|
||||
io->sector = sector;
|
||||
io->error = 0;
|
||||
io->base_io = NULL;
|
||||
io->ctx.req = NULL;
|
||||
atomic_set(&io->pending, 0);
|
||||
|
||||
return io;
|
||||
@@ -913,6 +893,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
|
||||
if (!atomic_dec_and_test(&io->pending))
|
||||
return;
|
||||
|
||||
if (io->ctx.req)
|
||||
mempool_free(io->ctx.req, cc->req_pool);
|
||||
mempool_free(io, cc->io_pool);
|
||||
|
||||
if (likely(!base_io))
|
||||
@@ -1358,8 +1340,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
|
||||
static void crypt_dtr(struct dm_target *ti)
|
||||
{
|
||||
struct crypt_config *cc = ti->private;
|
||||
struct crypt_cpu *cpu_cc;
|
||||
int cpu;
|
||||
|
||||
ti->private = NULL;
|
||||
|
||||
@@ -1371,13 +1351,6 @@ static void crypt_dtr(struct dm_target *ti)
|
||||
if (cc->crypt_queue)
|
||||
destroy_workqueue(cc->crypt_queue);
|
||||
|
||||
if (cc->cpu)
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_cc = per_cpu_ptr(cc->cpu, cpu);
|
||||
if (cpu_cc->req)
|
||||
mempool_free(cpu_cc->req, cc->req_pool);
|
||||
}
|
||||
|
||||
crypt_free_tfms(cc);
|
||||
|
||||
if (cc->bs)
|
||||
@@ -1396,9 +1369,6 @@ static void crypt_dtr(struct dm_target *ti)
|
||||
if (cc->dev)
|
||||
dm_put_device(ti, cc->dev);
|
||||
|
||||
if (cc->cpu)
|
||||
free_percpu(cc->cpu);
|
||||
|
||||
kzfree(cc->cipher);
|
||||
kzfree(cc->cipher_string);
|
||||
|
||||
@@ -1453,13 +1423,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
|
||||
if (tmp)
|
||||
DMWARN("Ignoring unexpected additional cipher options");
|
||||
|
||||
cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
|
||||
__alignof__(struct crypt_cpu));
|
||||
if (!cc->cpu) {
|
||||
ti->error = "Cannot allocate per cpu state";
|
||||
goto bad_mem;
|
||||
}
|
||||
|
||||
/*
|
||||
* For compatibility with the original dm-crypt mapping format, if
|
||||
* only the cipher name is supplied, use cbc-plain.
|
||||
|
||||
Reference in New Issue
Block a user