aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/mcryptd.c')
-rw-r--r--crypto/mcryptd.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index b4f3930266b1..f620fe09d20a 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -80,6 +80,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); 80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
82 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); 82 INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
83 spin_lock_init(&cpu_queue->q_lock);
83 } 84 }
84 return 0; 85 return 0;
85} 86}
@@ -103,15 +104,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
103 int cpu, err; 104 int cpu, err;
104 struct mcryptd_cpu_queue *cpu_queue; 105 struct mcryptd_cpu_queue *cpu_queue;
105 106
106 cpu = get_cpu(); 107 cpu_queue = raw_cpu_ptr(queue->cpu_queue);
107 cpu_queue = this_cpu_ptr(queue->cpu_queue); 108 spin_lock(&cpu_queue->q_lock);
108 rctx->tag.cpu = cpu; 109 cpu = smp_processor_id();
110 rctx->tag.cpu = smp_processor_id();
109 111
110 err = crypto_enqueue_request(&cpu_queue->queue, request); 112 err = crypto_enqueue_request(&cpu_queue->queue, request);
111 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", 113 pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
112 cpu, cpu_queue, request); 114 cpu, cpu_queue, request);
115 spin_unlock(&cpu_queue->q_lock);
113 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 116 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
114 put_cpu();
115 117
116 return err; 118 return err;
117} 119}
@@ -164,16 +166,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
164 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); 166 cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
165 i = 0; 167 i = 0;
166 while (i < MCRYPTD_BATCH || single_task_running()) { 168 while (i < MCRYPTD_BATCH || single_task_running()) {
167 /* 169
168 * preempt_disable/enable is used to prevent 170 spin_lock_bh(&cpu_queue->q_lock);
169 * being preempted by mcryptd_enqueue_request()
170 */
171 local_bh_disable();
172 preempt_disable();
173 backlog = crypto_get_backlog(&cpu_queue->queue); 171 backlog = crypto_get_backlog(&cpu_queue->queue);
174 req = crypto_dequeue_request(&cpu_queue->queue); 172 req = crypto_dequeue_request(&cpu_queue->queue);
175 preempt_enable(); 173 spin_unlock_bh(&cpu_queue->q_lock);
176 local_bh_enable();
177 174
178 if (!req) { 175 if (!req) {
179 mcryptd_opportunistic_flush(); 176 mcryptd_opportunistic_flush();
@@ -188,7 +185,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
188 ++i; 185 ++i;
189 } 186 }
190 if (cpu_queue->queue.qlen) 187 if (cpu_queue->queue.qlen)
191 queue_work(kcrypto_wq, &cpu_queue->work); 188 queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
192} 189}
193 190
194void mcryptd_flusher(struct work_struct *__work) 191void mcryptd_flusher(struct work_struct *__work)