diff options
Diffstat (limited to 'crypto/heh.c')
-rw-r--r-- | crypto/heh.c | 1033 |
1 files changed, 1033 insertions, 0 deletions
diff --git a/crypto/heh.c b/crypto/heh.c new file mode 100644 index 000000000000..10c00aaf797e --- /dev/null +++ b/crypto/heh.c | |||
@@ -0,0 +1,1033 @@ | |||
1 | /* | ||
2 | * HEH: Hash-Encrypt-Hash mode | ||
3 | * | ||
4 | * Copyright (c) 2016 Google Inc. | ||
5 | * | ||
6 | * Authors: | ||
7 | * Alex Cope <alexcope@google.com> | ||
8 | * Eric Biggers <ebiggers@google.com> | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * Hash-Encrypt-Hash (HEH) is a proposed block cipher mode of operation which | ||
13 | * extends the strong pseudo-random permutation (SPRP) property of block ciphers | ||
14 | * (e.g. AES) to arbitrary length input strings. It uses two keyed invertible | ||
15 | * hash functions with a layer of ECB encryption applied in-between. The | ||
16 | * algorithm is specified by the following Internet Draft: | ||
17 | * | ||
18 | * https://tools.ietf.org/html/draft-cope-heh-01 | ||
19 | * | ||
20 | * Although HEH can be used as either a regular symmetric cipher or as an AEAD, | ||
21 | * currently this module only provides it as a symmetric cipher. Additionally, | ||
22 | * only 16-byte nonces are supported. | ||
23 | */ | ||
24 | |||
25 | #include <crypto/gf128mul.h> | ||
26 | #include <crypto/internal/hash.h> | ||
27 | #include <crypto/internal/skcipher.h> | ||
28 | #include <crypto/scatterwalk.h> | ||
29 | #include <crypto/skcipher.h> | ||
30 | #include "internal.h" | ||
31 | |||
32 | /* | ||
33 | * The block size is the size of GF(2^128) elements and also the required block | ||
34 | * size of the underlying block cipher. | ||
35 | */ | ||
36 | #define HEH_BLOCK_SIZE 16 | ||
37 | |||
38 | struct heh_instance_ctx { | ||
39 | struct crypto_shash_spawn cmac; | ||
40 | struct crypto_shash_spawn poly_hash; | ||
41 | struct crypto_skcipher_spawn ecb; | ||
42 | }; | ||
43 | |||
44 | struct heh_tfm_ctx { | ||
45 | struct crypto_shash *cmac; | ||
46 | struct crypto_shash *poly_hash; /* keyed with tau_key */ | ||
47 | struct crypto_ablkcipher *ecb; | ||
48 | }; | ||
49 | |||
50 | struct heh_cmac_data { | ||
51 | u8 nonce[HEH_BLOCK_SIZE]; | ||
52 | __le32 nonce_length; | ||
53 | __le32 aad_length; | ||
54 | __le32 message_length; | ||
55 | __le32 padding; | ||
56 | }; | ||
57 | |||
58 | struct heh_req_ctx { /* aligned to alignmask */ | ||
59 | be128 beta1_key; | ||
60 | be128 beta2_key; | ||
61 | union { | ||
62 | struct { | ||
63 | struct heh_cmac_data data; | ||
64 | struct shash_desc desc; | ||
65 | /* + crypto_shash_descsize(cmac) */ | ||
66 | } cmac; | ||
67 | struct { | ||
68 | struct shash_desc desc; | ||
69 | /* + crypto_shash_descsize(poly_hash) */ | ||
70 | } poly_hash; | ||
71 | struct { | ||
72 | u8 keystream[HEH_BLOCK_SIZE]; | ||
73 | u8 tmp[HEH_BLOCK_SIZE]; | ||
74 | struct scatterlist tmp_sgl[2]; | ||
75 | struct ablkcipher_request req; | ||
76 | /* + crypto_ablkcipher_reqsize(ecb) */ | ||
77 | } ecb; | ||
78 | } u; | ||
79 | }; | ||
80 | |||
81 | /* | ||
82 | * Get the offset in bytes to the last full block, or equivalently the length of | ||
83 | * all full blocks excluding the last | ||
84 | */ | ||
85 | static inline unsigned int get_tail_offset(unsigned int len) | ||
86 | { | ||
87 | len -= len % HEH_BLOCK_SIZE; | ||
88 | return len - HEH_BLOCK_SIZE; | ||
89 | } | ||
90 | |||
91 | static inline struct heh_req_ctx *heh_req_ctx(struct ablkcipher_request *req) | ||
92 | { | ||
93 | unsigned int alignmask = crypto_ablkcipher_alignmask( | ||
94 | crypto_ablkcipher_reqtfm(req)); | ||
95 | |||
96 | return (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), | ||
97 | alignmask + 1); | ||
98 | } | ||
99 | |||
100 | static inline void async_done(struct crypto_async_request *areq, int err, | ||
101 | int (*next_step)(struct ablkcipher_request *, | ||
102 | u32)) | ||
103 | { | ||
104 | struct ablkcipher_request *req = areq->data; | ||
105 | |||
106 | if (err) | ||
107 | goto out; | ||
108 | |||
109 | err = next_step(req, req->base.flags & ~CRYPTO_TFM_REQ_MAY_SLEEP); | ||
110 | if (err == -EINPROGRESS || | ||
111 | (err == -EBUSY && (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) | ||
112 | return; | ||
113 | out: | ||
114 | ablkcipher_request_complete(req, err); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Generate the per-message "beta" keys used by the hashing layers of HEH. The | ||
119 | * first beta key is the CMAC of the nonce, the additional authenticated data | ||
120 | * (AAD), and the lengths in bytes of the nonce, AAD, and message. The nonce | ||
121 | * and AAD are each zero-padded to the next 16-byte block boundary, and the | ||
122 | * lengths are serialized as 4-byte little endian integers and zero-padded to | ||
123 | * the next 16-byte block boundary. | ||
124 | * The second beta key is the first one interpreted as an element in GF(2^128) | ||
125 | * and multiplied by x. | ||
126 | * | ||
127 | * Note that because the nonce and AAD may, in general, be variable-length, the | ||
128 | * key generation must be done by a pseudo-random function (PRF) on | ||
129 | * variable-length inputs. CBC-MAC does not satisfy this, as it is only a PRF | ||
130 | * on fixed-length inputs. CMAC remedies this flaw. Including the lengths of | ||
131 | * the nonce, AAD, and message is also critical to avoid collisions. | ||
132 | * | ||
133 | * That being said, this implementation does not yet operate as an AEAD and | ||
134 | * therefore there is never any AAD, nor are variable-length nonces supported. | ||
135 | */ | ||
136 | static int generate_betas(struct ablkcipher_request *req, | ||
137 | be128 *beta1_key, be128 *beta2_key) | ||
138 | { | ||
139 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
140 | struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
141 | struct heh_req_ctx *rctx = heh_req_ctx(req); | ||
142 | struct heh_cmac_data *data = &rctx->u.cmac.data; | ||
143 | struct shash_desc *desc = &rctx->u.cmac.desc; | ||
144 | int err; | ||
145 | |||
146 | BUILD_BUG_ON(sizeof(*data) != 2 * HEH_BLOCK_SIZE); | ||
147 | memcpy(data->nonce, req->info, HEH_BLOCK_SIZE); | ||
148 | data->nonce_length = cpu_to_le32(HEH_BLOCK_SIZE); | ||
149 | data->aad_length = cpu_to_le32(0); | ||
150 | data->message_length = cpu_to_le32(req->nbytes); | ||
151 | data->padding = cpu_to_le32(0); | ||
152 | |||
153 | desc->tfm = ctx->cmac; | ||
154 | desc->flags = req->base.flags; | ||
155 | |||
156 | err = crypto_shash_digest(desc, (const u8 *)data, sizeof(*data), | ||
157 | (u8 *)beta1_key); | ||
158 | if (err) | ||
159 | return err; | ||
160 | |||
161 | gf128mul_x_ble(beta2_key, beta1_key); | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /*****************************************************************************/ | ||
166 | |||
167 | /* | ||
168 | * This is the generic version of poly_hash. It does the GF(2^128) | ||
169 | * multiplication by 'tau_key' using a precomputed table, without using any | ||
170 | * special CPU instructions. On some platforms, an accelerated version (with | ||
171 | * higher cra_priority) may be used instead. | ||
172 | */ | ||
173 | |||
174 | struct poly_hash_tfm_ctx { | ||
175 | struct gf128mul_4k *tau_key; | ||
176 | }; | ||
177 | |||
178 | struct poly_hash_desc_ctx { | ||
179 | be128 digest; | ||
180 | unsigned int count; | ||
181 | }; | ||
182 | |||
183 | static int poly_hash_setkey(struct crypto_shash *tfm, | ||
184 | const u8 *key, unsigned int keylen) | ||
185 | { | ||
186 | struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(tfm); | ||
187 | be128 key128; | ||
188 | |||
189 | if (keylen != HEH_BLOCK_SIZE) { | ||
190 | crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
191 | return -EINVAL; | ||
192 | } | ||
193 | |||
194 | if (tctx->tau_key) | ||
195 | gf128mul_free_4k(tctx->tau_key); | ||
196 | memcpy(&key128, key, HEH_BLOCK_SIZE); | ||
197 | tctx->tau_key = gf128mul_init_4k_ble(&key128); | ||
198 | if (!tctx->tau_key) | ||
199 | return -ENOMEM; | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static int poly_hash_init(struct shash_desc *desc) | ||
204 | { | ||
205 | struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); | ||
206 | |||
207 | ctx->digest = (be128) { 0 }; | ||
208 | ctx->count = 0; | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int poly_hash_update(struct shash_desc *desc, const u8 *src, | ||
213 | unsigned int len) | ||
214 | { | ||
215 | struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); | ||
216 | struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); | ||
217 | unsigned int partial = ctx->count % HEH_BLOCK_SIZE; | ||
218 | u8 *dst = (u8 *)&ctx->digest + partial; | ||
219 | |||
220 | ctx->count += len; | ||
221 | |||
222 | /* Finishing at least one block? */ | ||
223 | if (partial + len >= HEH_BLOCK_SIZE) { | ||
224 | |||
225 | if (partial) { | ||
226 | /* Finish the pending block. */ | ||
227 | unsigned int n = HEH_BLOCK_SIZE - partial; | ||
228 | |||
229 | len -= n; | ||
230 | do { | ||
231 | *dst++ ^= *src++; | ||
232 | } while (--n); | ||
233 | |||
234 | gf128mul_4k_ble(&ctx->digest, tctx->tau_key); | ||
235 | } | ||
236 | |||
237 | /* Process zero or more full blocks. */ | ||
238 | while (len >= HEH_BLOCK_SIZE) { | ||
239 | be128 coeff; | ||
240 | |||
241 | memcpy(&coeff, src, HEH_BLOCK_SIZE); | ||
242 | be128_xor(&ctx->digest, &ctx->digest, &coeff); | ||
243 | src += HEH_BLOCK_SIZE; | ||
244 | len -= HEH_BLOCK_SIZE; | ||
245 | gf128mul_4k_ble(&ctx->digest, tctx->tau_key); | ||
246 | } | ||
247 | dst = (u8 *)&ctx->digest; | ||
248 | } | ||
249 | |||
250 | /* Continue adding the next block to 'digest'. */ | ||
251 | while (len--) | ||
252 | *dst++ ^= *src++; | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static int poly_hash_final(struct shash_desc *desc, u8 *out) | ||
257 | { | ||
258 | struct poly_hash_desc_ctx *ctx = shash_desc_ctx(desc); | ||
259 | |||
260 | /* Finish the last block if needed. */ | ||
261 | if (ctx->count % HEH_BLOCK_SIZE) { | ||
262 | struct poly_hash_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); | ||
263 | |||
264 | gf128mul_4k_ble(&ctx->digest, tctx->tau_key); | ||
265 | } | ||
266 | |||
267 | memcpy(out, &ctx->digest, HEH_BLOCK_SIZE); | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | static void poly_hash_exit(struct crypto_tfm *tfm) | ||
272 | { | ||
273 | struct poly_hash_tfm_ctx *tctx = crypto_tfm_ctx(tfm); | ||
274 | |||
275 | gf128mul_free_4k(tctx->tau_key); | ||
276 | } | ||
277 | |||
278 | static struct shash_alg poly_hash_alg = { | ||
279 | .digestsize = HEH_BLOCK_SIZE, | ||
280 | .init = poly_hash_init, | ||
281 | .update = poly_hash_update, | ||
282 | .final = poly_hash_final, | ||
283 | .setkey = poly_hash_setkey, | ||
284 | .descsize = sizeof(struct poly_hash_desc_ctx), | ||
285 | .base = { | ||
286 | .cra_name = "poly_hash", | ||
287 | .cra_driver_name = "poly_hash-generic", | ||
288 | .cra_priority = 100, | ||
289 | .cra_ctxsize = sizeof(struct poly_hash_tfm_ctx), | ||
290 | .cra_exit = poly_hash_exit, | ||
291 | .cra_module = THIS_MODULE, | ||
292 | }, | ||
293 | }; | ||
294 | |||
295 | /*****************************************************************************/ | ||
296 | |||
297 | /* | ||
298 | * Split the message into 16 byte blocks, padding out the last block, and use | ||
299 | * the blocks as coefficients in the evaluation of a polynomial over GF(2^128) | ||
300 | * at the secret point 'tau_key'. For ease of implementing the higher-level | ||
301 | * heh_hash_inv() function, the constant and degree-1 coefficients are swapped | ||
302 | * if there is a partial block. | ||
303 | * | ||
304 | * Mathematically, compute: | ||
305 | * if (no partial block) | ||
306 | * k^{N-1} * m_0 + ... + k * m_{N-2} + m_{N-1} | ||
307 | * else if (partial block) | ||
308 | * k^N * m_0 + ... + k^2 * m_{N-2} + k * m_N + m_{N-1} | ||
309 | * | ||
310 | * where: | ||
311 | * t is tau_key | ||
312 | * N is the number of full blocks in the message | ||
313 | * m_i is the i-th full block in the message for i = 0 to N-1 inclusive | ||
314 | * m_N is the partial block of the message zero-padded up to 16 bytes | ||
315 | * | ||
316 | * Note that most of this is now separated out into its own keyed hash | ||
317 | * algorithm, to allow optimized implementations. However, we still handle the | ||
318 | * swapping of the last two coefficients here in the HEH template because this | ||
319 | * simplifies the poly_hash algorithms: they don't have to buffer an extra | ||
320 | * block, don't have to duplicate as much code, and are more similar to GHASH. | ||
321 | */ | ||
322 | static int poly_hash(struct ablkcipher_request *req, struct scatterlist *sgl, | ||
323 | be128 *hash) | ||
324 | { | ||
325 | struct heh_req_ctx *rctx = heh_req_ctx(req); | ||
326 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
327 | struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
328 | struct shash_desc *desc = &rctx->u.poly_hash.desc; | ||
329 | unsigned int tail_offset = get_tail_offset(req->nbytes); | ||
330 | unsigned int tail_len = req->nbytes - tail_offset; | ||
331 | be128 tail[2]; | ||
332 | unsigned int i, n; | ||
333 | struct sg_mapping_iter miter; | ||
334 | int err; | ||
335 | |||
336 | desc->tfm = ctx->poly_hash; | ||
337 | desc->flags = req->base.flags; | ||
338 | |||
339 | /* Handle all full blocks except the last */ | ||
340 | err = crypto_shash_init(desc); | ||
341 | sg_miter_start(&miter, sgl, sg_nents(sgl), | ||
342 | SG_MITER_FROM_SG | SG_MITER_ATOMIC); | ||
343 | for (i = 0; i < tail_offset && !err; i += n) { | ||
344 | sg_miter_next(&miter); | ||
345 | n = min_t(unsigned int, miter.length, tail_offset - i); | ||
346 | err = crypto_shash_update(desc, miter.addr, n); | ||
347 | } | ||
348 | sg_miter_stop(&miter); | ||
349 | if (err) | ||
350 | return err; | ||
351 | |||
352 | /* Handle the last full block and the partial block */ | ||
353 | scatterwalk_map_and_copy(tail, sgl, tail_offset, tail_len, 0); | ||
354 | |||
355 | if (tail_len != HEH_BLOCK_SIZE) { | ||
356 | /* handle the partial block */ | ||
357 | memset((u8 *)tail + tail_len, 0, sizeof(tail) - tail_len); | ||
358 | err = crypto_shash_update(desc, (u8 *)&tail[1], HEH_BLOCK_SIZE); | ||
359 | if (err) | ||
360 | return err; | ||
361 | } | ||
362 | err = crypto_shash_final(desc, (u8 *)hash); | ||
363 | if (err) | ||
364 | return err; | ||
365 | be128_xor(hash, hash, &tail[0]); | ||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * Transform all full blocks except the last. | ||
371 | * This is used by both the hash and inverse hash phases. | ||
372 | */ | ||
373 | static int heh_tfm_blocks(struct ablkcipher_request *req, | ||
374 | struct scatterlist *src_sgl, | ||
375 | struct scatterlist *dst_sgl, unsigned int len, | ||
376 | const be128 *hash, const be128 *beta_key) | ||
377 | { | ||
378 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
379 | struct blkcipher_desc desc = { .flags = req->base.flags }; | ||
380 | struct blkcipher_walk walk; | ||
381 | be128 e = *beta_key; | ||
382 | int err; | ||
383 | unsigned int nbytes; | ||
384 | |||
385 | blkcipher_walk_init(&walk, dst_sgl, src_sgl, len); | ||
386 | |||
387 | err = blkcipher_ablkcipher_walk_virt(&desc, &walk, tfm); | ||
388 | |||
389 | while ((nbytes = walk.nbytes)) { | ||
390 | const be128 *src = (be128 *)walk.src.virt.addr; | ||
391 | be128 *dst = (be128 *)walk.dst.virt.addr; | ||
392 | |||
393 | do { | ||
394 | gf128mul_x_ble(&e, &e); | ||
395 | be128_xor(dst, src, hash); | ||
396 | be128_xor(dst, dst, &e); | ||
397 | src++; | ||
398 | dst++; | ||
399 | } while ((nbytes -= HEH_BLOCK_SIZE) >= HEH_BLOCK_SIZE); | ||
400 | err = blkcipher_walk_done(&desc, &walk, nbytes); | ||
401 | } | ||
402 | return err; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * The hash phase of HEH. Given a message, compute: | ||
407 | * | ||
408 | * (m_0 + H, ..., m_{N-2} + H, H, m_N) + (xb, x^2b, ..., x^{N-1}b, b, 0) | ||
409 | * | ||
410 | * where: | ||
411 | * N is the number of full blocks in the message | ||
412 | * m_i is the i-th full block in the message for i = 0 to N-1 inclusive | ||
413 | * m_N is the unpadded partial block, possibly empty | ||
414 | * H is the poly_hash() of the message, keyed by tau_key | ||
415 | * b is beta_key | ||
416 | * x is the element x in our representation of GF(2^128) | ||
417 | * | ||
418 | * Note that the partial block remains unchanged, but it does affect the result | ||
419 | * of poly_hash() and therefore the transformation of all the full blocks. | ||
420 | */ | ||
421 | static int heh_hash(struct ablkcipher_request *req, const be128 *beta_key) | ||
422 | { | ||
423 | be128 hash; | ||
424 | unsigned int tail_offset = get_tail_offset(req->nbytes); | ||
425 | unsigned int partial_len = req->nbytes % HEH_BLOCK_SIZE; | ||
426 | int err; | ||
427 | |||
428 | /* poly_hash() the full message including the partial block */ | ||
429 | err = poly_hash(req, req->src, &hash); | ||
430 | if (err) | ||
431 | return err; | ||
432 | |||
433 | /* Transform all full blocks except the last */ | ||
434 | err = heh_tfm_blocks(req, req->src, req->dst, tail_offset, &hash, | ||
435 | beta_key); | ||
436 | if (err) | ||
437 | return err; | ||
438 | |||
439 | /* Set the last full block to hash XOR beta_key */ | ||
440 | be128_xor(&hash, &hash, beta_key); | ||
441 | scatterwalk_map_and_copy(&hash, req->dst, tail_offset, HEH_BLOCK_SIZE, | ||
442 | 1); | ||
443 | |||
444 | /* Copy the partial block if needed */ | ||
445 | if (partial_len != 0 && req->src != req->dst) { | ||
446 | unsigned int offs = tail_offset + HEH_BLOCK_SIZE; | ||
447 | |||
448 | scatterwalk_map_and_copy(&hash, req->src, offs, partial_len, 0); | ||
449 | scatterwalk_map_and_copy(&hash, req->dst, offs, partial_len, 1); | ||
450 | } | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * The inverse hash phase of HEH. This undoes the result of heh_hash(). | ||
456 | */ | ||
457 | static int heh_hash_inv(struct ablkcipher_request *req, const be128 *beta_key) | ||
458 | { | ||
459 | be128 hash; | ||
460 | be128 tmp; | ||
461 | struct scatterlist tmp_sgl[2]; | ||
462 | struct scatterlist *tail_sgl; | ||
463 | unsigned int tail_offset = get_tail_offset(req->nbytes); | ||
464 | struct scatterlist *sgl = req->dst; | ||
465 | int err; | ||
466 | |||
467 | /* | ||
468 | * The last full block was computed as hash XOR beta_key, so XOR it with | ||
469 | * beta_key to recover hash. | ||
470 | */ | ||
471 | tail_sgl = scatterwalk_ffwd(tmp_sgl, sgl, tail_offset); | ||
472 | scatterwalk_map_and_copy(&hash, tail_sgl, 0, HEH_BLOCK_SIZE, 0); | ||
473 | be128_xor(&hash, &hash, beta_key); | ||
474 | |||
475 | /* Transform all full blocks except the last */ | ||
476 | err = heh_tfm_blocks(req, sgl, sgl, tail_offset, &hash, beta_key); | ||
477 | if (err) | ||
478 | return err; | ||
479 | |||
480 | /* | ||
481 | * Recover the last full block. We know 'hash', i.e. the poly_hash() of | ||
482 | * the the original message. The last full block was the constant term | ||
483 | * of the polynomial. To recover the last full block, temporarily zero | ||
484 | * it, compute the poly_hash(), and take the difference from 'hash'. | ||
485 | */ | ||
486 | memset(&tmp, 0, sizeof(tmp)); | ||
487 | scatterwalk_map_and_copy(&tmp, tail_sgl, 0, HEH_BLOCK_SIZE, 1); | ||
488 | err = poly_hash(req, sgl, &tmp); | ||
489 | if (err) | ||
490 | return err; | ||
491 | be128_xor(&tmp, &tmp, &hash); | ||
492 | scatterwalk_map_and_copy(&tmp, tail_sgl, 0, HEH_BLOCK_SIZE, 1); | ||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | static int heh_hash_inv_step(struct ablkcipher_request *req, u32 flags) | ||
497 | { | ||
498 | struct heh_req_ctx *rctx = heh_req_ctx(req); | ||
499 | |||
500 | return heh_hash_inv(req, &rctx->beta2_key); | ||
501 | } | ||
502 | |||
503 | static int heh_ecb_step_3(struct ablkcipher_request *req, u32 flags) | ||
504 | { | ||
505 | struct heh_req_ctx *rctx = heh_req_ctx(req); | ||
506 | u8 partial_block[HEH_BLOCK_SIZE] __aligned(__alignof__(u32)); | ||
507 | unsigned int tail_offset = get_tail_offset(req->nbytes); | ||
508 | unsigned int partial_offset = tail_offset + HEH_BLOCK_SIZE; | ||
509 | unsigned int partial_len = req->nbytes - partial_offset; | ||
510 | |||
511 | /* | ||
512 | * Extract the pad in req->dst at tail_offset, and xor the partial block | ||
513 | * with it to create encrypted partial block | ||
514 | */ | ||
515 | scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, | ||
516 | HEH_BLOCK_SIZE, 0); | ||
517 | scatterwalk_map_and_copy(partial_block, req->dst, partial_offset, | ||
518 | partial_len, 0); | ||
519 | crypto_xor(partial_block, rctx->u.ecb.keystream, partial_len); | ||
520 | |||
521 | /* | ||
522 | * Store the encrypted final block and partial block back in dst_sg | ||
523 | */ | ||
524 | scatterwalk_map_and_copy(&rctx->u.ecb.tmp, req->dst, tail_offset, | ||
525 | HEH_BLOCK_SIZE, 1); | ||
526 | scatterwalk_map_and_copy(partial_block, req->dst, partial_offset, | ||
527 | partial_len, 1); | ||
528 | |||
529 | return heh_hash_inv_step(req, flags); | ||
530 | } | ||
531 | |||
532 | static void heh_ecb_step_2_done(struct crypto_async_request *areq, int err) | ||
533 | { | ||
534 | return async_done(areq, err, heh_ecb_step_3); | ||
535 | } | ||
536 | |||
537 | static int heh_ecb_step_2(struct ablkcipher_request *req, u32 flags) | ||
538 | { | ||
539 | struct heh_req_ctx *rctx = heh_req_ctx(req); | ||
540 | unsigned int partial_len = req->nbytes % HEH_BLOCK_SIZE; | ||
541 | struct scatterlist *tmp_sgl; | ||
542 | int err; | ||
543 | unsigned int tail_offset = get_tail_offset(req->nbytes); | ||
544 | |||
545 | if (partial_len == 0) | ||
546 | return heh_hash_inv_step(req, flags); | ||
547 | |||
548 | /* | ||
549 | * Extract the final full block, store it in tmp, and then xor that with | ||
550 | * the value saved in u.ecb.keystream | ||
551 | */ | ||
552 | scatterwalk_map_and_copy(rctx->u.ecb.tmp, req->dst, tail_offset, | ||
553 | HEH_BLOCK_SIZE, 0); | ||
554 | crypto_xor(rctx->u.ecb.keystream, rctx->u.ecb.tmp, HEH_BLOCK_SIZE); | ||
555 | |||
556 | /* | ||
557 | * Encrypt the value in rctx->u.ecb.keystream to create the pad for the | ||
558 | * partial block. | ||
559 | * We cannot encrypt stack buffers, so re-use the dst_sg to do this | ||
560 | * encryption to avoid a malloc. The value at tail_offset is stored in | ||
561 | * tmp, and will be restored later. | ||
562 | */ | ||
563 | scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, | ||
564 | HEH_BLOCK_SIZE, 1); | ||
565 | tmp_sgl = scatterwalk_ffwd(rctx->u.ecb.tmp_sgl, req->dst, tail_offset); | ||
566 | ablkcipher_request_set_callback(&rctx->u.ecb.req, flags, | ||
567 | heh_ecb_step_2_done, req); | ||
568 | ablkcipher_request_set_crypt(&rctx->u.ecb.req, tmp_sgl, tmp_sgl, | ||
569 | HEH_BLOCK_SIZE, NULL); | ||
570 | err = crypto_ablkcipher_encrypt(&rctx->u.ecb.req); | ||
571 | if (err) | ||
572 | return err; | ||
573 | return heh_ecb_step_3(req, flags); | ||
574 | } | ||
575 | |||
576 | static void heh_ecb_full_done(struct crypto_async_request *areq, int err) | ||
577 | { | ||
578 | return async_done(areq, err, heh_ecb_step_2); | ||
579 | } | ||
580 | |||
581 | /* | ||
582 | * The encrypt phase of HEH. This uses ECB encryption, with special handling | ||
583 | * for the partial block at the end if any. The source data is already in | ||
584 | * req->dst, so the encryption happens in-place. | ||
585 | * | ||
586 | * After the encrypt phase we continue on to the inverse hash phase. The | ||
587 | * functions calls are chained to support asynchronous ECB algorithms. | ||
588 | */ | ||
589 | static int heh_ecb(struct ablkcipher_request *req, bool decrypt) | ||
590 | { | ||
591 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
592 | struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
593 | struct heh_req_ctx *rctx = heh_req_ctx(req); | ||
594 | struct ablkcipher_request *ecb_req = &rctx->u.ecb.req; | ||
595 | unsigned int tail_offset = get_tail_offset(req->nbytes); | ||
596 | unsigned int full_len = tail_offset + HEH_BLOCK_SIZE; | ||
597 | int err; | ||
598 | |||
599 | /* | ||
600 | * Save the last full block before it is encrypted/decrypted. This will | ||
601 | * be used later to encrypt/decrypt the partial block | ||
602 | */ | ||
603 | scatterwalk_map_and_copy(rctx->u.ecb.keystream, req->dst, tail_offset, | ||
604 | HEH_BLOCK_SIZE, 0); | ||
605 | |||
606 | /* Encrypt/decrypt all full blocks */ | ||
607 | ablkcipher_request_set_tfm(ecb_req, ctx->ecb); | ||
608 | ablkcipher_request_set_callback(ecb_req, req->base.flags, | ||
609 | heh_ecb_full_done, req); | ||
610 | ablkcipher_request_set_crypt(ecb_req, req->dst, req->dst, full_len, | ||
611 | NULL); | ||
612 | if (decrypt) | ||
613 | err = crypto_ablkcipher_decrypt(ecb_req); | ||
614 | else | ||
615 | err = crypto_ablkcipher_encrypt(ecb_req); | ||
616 | if (err) | ||
617 | return err; | ||
618 | |||
619 | return heh_ecb_step_2(req, req->base.flags); | ||
620 | } | ||
621 | |||
622 | static int heh_crypt(struct ablkcipher_request *req, bool decrypt) | ||
623 | { | ||
624 | struct heh_req_ctx *rctx = heh_req_ctx(req); | ||
625 | int err; | ||
626 | |||
627 | /* Inputs must be at least one full block */ | ||
628 | if (req->nbytes < HEH_BLOCK_SIZE) | ||
629 | return -EINVAL; | ||
630 | |||
631 | err = generate_betas(req, &rctx->beta1_key, &rctx->beta2_key); | ||
632 | if (err) | ||
633 | return err; | ||
634 | |||
635 | if (decrypt) | ||
636 | swap(rctx->beta1_key, rctx->beta2_key); | ||
637 | |||
638 | err = heh_hash(req, &rctx->beta1_key); | ||
639 | if (err) | ||
640 | return err; | ||
641 | |||
642 | return heh_ecb(req, decrypt); | ||
643 | } | ||
644 | |||
645 | static int heh_encrypt(struct ablkcipher_request *req) | ||
646 | { | ||
647 | return heh_crypt(req, false); | ||
648 | } | ||
649 | |||
650 | static int heh_decrypt(struct ablkcipher_request *req) | ||
651 | { | ||
652 | return heh_crypt(req, true); | ||
653 | } | ||
654 | |||
655 | static int heh_setkey(struct crypto_ablkcipher *parent, const u8 *key, | ||
656 | unsigned int keylen) | ||
657 | { | ||
658 | struct heh_tfm_ctx *ctx = crypto_ablkcipher_ctx(parent); | ||
659 | struct crypto_shash *cmac = ctx->cmac; | ||
660 | struct crypto_ablkcipher *ecb = ctx->ecb; | ||
661 | SHASH_DESC_ON_STACK(desc, cmac); | ||
662 | u8 *derived_keys; | ||
663 | u8 digest[HEH_BLOCK_SIZE]; | ||
664 | unsigned int i; | ||
665 | int err; | ||
666 | |||
667 | /* set prf_key = key */ | ||
668 | crypto_shash_clear_flags(cmac, CRYPTO_TFM_REQ_MASK); | ||
669 | crypto_shash_set_flags(cmac, crypto_ablkcipher_get_flags(parent) & | ||
670 | CRYPTO_TFM_REQ_MASK); | ||
671 | err = crypto_shash_setkey(cmac, key, keylen); | ||
672 | crypto_ablkcipher_set_flags(parent, crypto_shash_get_flags(cmac) & | ||
673 | CRYPTO_TFM_RES_MASK); | ||
674 | if (err) | ||
675 | return err; | ||
676 | |||
677 | /* | ||
678 | * Generate tau_key and ecb_key as follows: | ||
679 | * tau_key = cmac(prf_key, 0x00...01) | ||
680 | * ecb_key = cmac(prf_key, 0x00...02) || cmac(prf_key, 0x00...03) || ... | ||
681 | * truncated to keylen bytes | ||
682 | */ | ||
683 | derived_keys = kzalloc(round_up(HEH_BLOCK_SIZE + keylen, | ||
684 | HEH_BLOCK_SIZE), GFP_KERNEL); | ||
685 | if (!derived_keys) | ||
686 | return -ENOMEM; | ||
687 | desc->tfm = cmac; | ||
688 | desc->flags = (crypto_shash_get_flags(cmac) & CRYPTO_TFM_REQ_MASK); | ||
689 | for (i = 0; i < keylen + HEH_BLOCK_SIZE; i += HEH_BLOCK_SIZE) { | ||
690 | derived_keys[i + HEH_BLOCK_SIZE - 1] = | ||
691 | 0x01 + i / HEH_BLOCK_SIZE; | ||
692 | err = crypto_shash_digest(desc, derived_keys + i, | ||
693 | HEH_BLOCK_SIZE, digest); | ||
694 | if (err) | ||
695 | goto out; | ||
696 | memcpy(derived_keys + i, digest, HEH_BLOCK_SIZE); | ||
697 | } | ||
698 | |||
699 | err = crypto_shash_setkey(ctx->poly_hash, derived_keys, HEH_BLOCK_SIZE); | ||
700 | if (err) | ||
701 | goto out; | ||
702 | |||
703 | crypto_ablkcipher_clear_flags(ecb, CRYPTO_TFM_REQ_MASK); | ||
704 | crypto_ablkcipher_set_flags(ecb, crypto_ablkcipher_get_flags(parent) & | ||
705 | CRYPTO_TFM_REQ_MASK); | ||
706 | err = crypto_ablkcipher_setkey(ecb, derived_keys + HEH_BLOCK_SIZE, | ||
707 | keylen); | ||
708 | crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(ecb) & | ||
709 | CRYPTO_TFM_RES_MASK); | ||
710 | out: | ||
711 | kzfree(derived_keys); | ||
712 | return err; | ||
713 | } | ||
714 | |||
715 | static int heh_init_tfm(struct crypto_tfm *tfm) | ||
716 | { | ||
717 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
718 | struct heh_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
719 | struct heh_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | ||
720 | struct crypto_shash *cmac; | ||
721 | struct crypto_shash *poly_hash; | ||
722 | struct crypto_ablkcipher *ecb; | ||
723 | unsigned int reqsize; | ||
724 | int err; | ||
725 | |||
726 | cmac = crypto_spawn_shash(&ictx->cmac); | ||
727 | if (IS_ERR(cmac)) | ||
728 | return PTR_ERR(cmac); | ||
729 | |||
730 | poly_hash = crypto_spawn_shash(&ictx->poly_hash); | ||
731 | err = PTR_ERR(poly_hash); | ||
732 | if (IS_ERR(poly_hash)) | ||
733 | goto err_free_cmac; | ||
734 | |||
735 | ecb = crypto_spawn_skcipher(&ictx->ecb); | ||
736 | err = PTR_ERR(ecb); | ||
737 | if (IS_ERR(ecb)) | ||
738 | goto err_free_poly_hash; | ||
739 | |||
740 | ctx->cmac = cmac; | ||
741 | ctx->poly_hash = poly_hash; | ||
742 | ctx->ecb = ecb; | ||
743 | |||
744 | reqsize = crypto_tfm_alg_alignmask(tfm) & | ||
745 | ~(crypto_tfm_ctx_alignment() - 1); | ||
746 | reqsize += max3(offsetof(struct heh_req_ctx, u.cmac.desc) + | ||
747 | sizeof(struct shash_desc) + | ||
748 | crypto_shash_descsize(cmac), | ||
749 | offsetof(struct heh_req_ctx, u.poly_hash.desc) + | ||
750 | sizeof(struct shash_desc) + | ||
751 | crypto_shash_descsize(poly_hash), | ||
752 | offsetof(struct heh_req_ctx, u.ecb.req) + | ||
753 | sizeof(struct ablkcipher_request) + | ||
754 | crypto_ablkcipher_reqsize(ecb)); | ||
755 | tfm->crt_ablkcipher.reqsize = reqsize; | ||
756 | |||
757 | return 0; | ||
758 | |||
759 | err_free_poly_hash: | ||
760 | crypto_free_shash(poly_hash); | ||
761 | err_free_cmac: | ||
762 | crypto_free_shash(cmac); | ||
763 | return err; | ||
764 | } | ||
765 | |||
766 | static void heh_exit_tfm(struct crypto_tfm *tfm) | ||
767 | { | ||
768 | struct heh_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | ||
769 | |||
770 | crypto_free_shash(ctx->cmac); | ||
771 | crypto_free_shash(ctx->poly_hash); | ||
772 | crypto_free_ablkcipher(ctx->ecb); | ||
773 | } | ||
774 | |||
775 | static void heh_free_instance(struct crypto_instance *inst) | ||
776 | { | ||
777 | struct heh_instance_ctx *ctx = crypto_instance_ctx(inst); | ||
778 | |||
779 | crypto_drop_shash(&ctx->cmac); | ||
780 | crypto_drop_shash(&ctx->poly_hash); | ||
781 | crypto_drop_skcipher(&ctx->ecb); | ||
782 | kfree(inst); | ||
783 | } | ||
784 | |||
785 | /* | ||
786 | * Create an instance of HEH as a ablkcipher. | ||
787 | * | ||
788 | * This relies on underlying CMAC and ECB algorithms, usually cmac(aes) and | ||
789 | * ecb(aes). For performance reasons we support asynchronous ECB algorithms. | ||
790 | * However, we do not yet support asynchronous CMAC algorithms because CMAC is | ||
791 | * only used on a small fixed amount of data per request, independent of the | ||
792 | * request length. This would change if AEAD or variable-length nonce support | ||
793 | * were to be exposed. | ||
794 | */ | ||
795 | static int heh_create_common(struct crypto_template *tmpl, struct rtattr **tb, | ||
796 | const char *full_name, const char *cmac_name, | ||
797 | const char *poly_hash_name, const char *ecb_name) | ||
798 | { | ||
799 | struct crypto_attr_type *algt; | ||
800 | struct crypto_instance *inst; | ||
801 | struct heh_instance_ctx *ctx; | ||
802 | struct shash_alg *cmac; | ||
803 | struct shash_alg *poly_hash; | ||
804 | struct crypto_alg *ecb; | ||
805 | int err; | ||
806 | |||
807 | algt = crypto_get_attr_type(tb); | ||
808 | if (IS_ERR(algt)) | ||
809 | return PTR_ERR(algt); | ||
810 | |||
811 | /* User must be asking for something compatible with ablkcipher */ | ||
812 | if ((algt->type ^ CRYPTO_ALG_TYPE_ABLKCIPHER) & algt->mask) | ||
813 | return -EINVAL; | ||
814 | |||
815 | /* Allocate the ablkcipher instance */ | ||
816 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | ||
817 | if (!inst) | ||
818 | return -ENOMEM; | ||
819 | |||
820 | ctx = crypto_instance_ctx(inst); | ||
821 | |||
822 | /* Set up the cmac spawn */ | ||
823 | ctx->cmac.base.inst = inst; | ||
824 | err = crypto_grab_shash(&ctx->cmac, cmac_name, 0, 0); | ||
825 | if (err) | ||
826 | goto err_free_inst; | ||
827 | cmac = crypto_spawn_shash_alg(&ctx->cmac); | ||
828 | err = -EINVAL; | ||
829 | if (cmac->digestsize != HEH_BLOCK_SIZE) | ||
830 | goto err_drop_cmac; | ||
831 | |||
832 | /* Set up the poly_hash spawn */ | ||
833 | ctx->poly_hash.base.inst = inst; | ||
834 | err = crypto_grab_shash(&ctx->poly_hash, poly_hash_name, 0, 0); | ||
835 | if (err) | ||
836 | goto err_drop_cmac; | ||
837 | poly_hash = crypto_spawn_shash_alg(&ctx->poly_hash); | ||
838 | err = -EINVAL; | ||
839 | if (poly_hash->digestsize != HEH_BLOCK_SIZE) | ||
840 | goto err_drop_poly_hash; | ||
841 | |||
842 | /* Set up the ecb spawn */ | ||
843 | ctx->ecb.base.inst = inst; | ||
844 | err = crypto_grab_skcipher(&ctx->ecb, ecb_name, 0, | ||
845 | crypto_requires_sync(algt->type, | ||
846 | algt->mask)); | ||
847 | if (err) | ||
848 | goto err_drop_poly_hash; | ||
849 | ecb = crypto_skcipher_spawn_alg(&ctx->ecb); | ||
850 | |||
851 | /* HEH only supports block ciphers with 16 byte block size */ | ||
852 | err = -EINVAL; | ||
853 | if (ecb->cra_blocksize != HEH_BLOCK_SIZE) | ||
854 | goto err_drop_ecb; | ||
855 | |||
856 | /* The underlying "ECB" algorithm must not require an IV */ | ||
857 | err = -EINVAL; | ||
858 | if ((ecb->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { | ||
859 | if (ecb->cra_blkcipher.ivsize != 0) | ||
860 | goto err_drop_ecb; | ||
861 | } else { | ||
862 | if (ecb->cra_ablkcipher.ivsize != 0) | ||
863 | goto err_drop_ecb; | ||
864 | } | ||
865 | |||
866 | /* Set the instance names */ | ||
867 | err = -ENAMETOOLONG; | ||
868 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | ||
869 | "heh_base(%s,%s,%s)", cmac->base.cra_driver_name, | ||
870 | poly_hash->base.cra_driver_name, | ||
871 | ecb->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | ||
872 | goto err_drop_ecb; | ||
873 | |||
874 | err = -ENAMETOOLONG; | ||
875 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | ||
876 | "%s", full_name) >= CRYPTO_MAX_ALG_NAME) | ||
877 | goto err_drop_ecb; | ||
878 | |||
879 | /* Finish initializing the instance */ | ||
880 | |||
881 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
882 | (ecb->cra_flags & CRYPTO_ALG_ASYNC); | ||
883 | inst->alg.cra_blocksize = HEH_BLOCK_SIZE; | ||
884 | inst->alg.cra_ctxsize = sizeof(struct heh_tfm_ctx); | ||
885 | inst->alg.cra_alignmask = ecb->cra_alignmask | (__alignof__(be128) - 1); | ||
886 | inst->alg.cra_priority = ecb->cra_priority; | ||
887 | inst->alg.cra_type = &crypto_ablkcipher_type; | ||
888 | inst->alg.cra_init = heh_init_tfm; | ||
889 | inst->alg.cra_exit = heh_exit_tfm; | ||
890 | |||
891 | inst->alg.cra_ablkcipher.setkey = heh_setkey; | ||
892 | inst->alg.cra_ablkcipher.encrypt = heh_encrypt; | ||
893 | inst->alg.cra_ablkcipher.decrypt = heh_decrypt; | ||
894 | if ((ecb->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) { | ||
895 | inst->alg.cra_ablkcipher.min_keysize = ecb->cra_blkcipher.min_keysize; | ||
896 | inst->alg.cra_ablkcipher.max_keysize = ecb->cra_blkcipher.max_keysize; | ||
897 | } else { | ||
898 | inst->alg.cra_ablkcipher.min_keysize = ecb->cra_ablkcipher.min_keysize; | ||
899 | inst->alg.cra_ablkcipher.max_keysize = ecb->cra_ablkcipher.max_keysize; | ||
900 | } | ||
901 | inst->alg.cra_ablkcipher.ivsize = HEH_BLOCK_SIZE; | ||
902 | |||
903 | /* Register the instance */ | ||
904 | err = crypto_register_instance(tmpl, inst); | ||
905 | if (err) | ||
906 | goto err_drop_ecb; | ||
907 | return 0; | ||
908 | |||
909 | err_drop_ecb: | ||
910 | crypto_drop_skcipher(&ctx->ecb); | ||
911 | err_drop_poly_hash: | ||
912 | crypto_drop_shash(&ctx->poly_hash); | ||
913 | err_drop_cmac: | ||
914 | crypto_drop_shash(&ctx->cmac); | ||
915 | err_free_inst: | ||
916 | kfree(inst); | ||
917 | return err; | ||
918 | } | ||
919 | |||
920 | static int heh_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
921 | { | ||
922 | const char *cipher_name; | ||
923 | char full_name[CRYPTO_MAX_ALG_NAME]; | ||
924 | char cmac_name[CRYPTO_MAX_ALG_NAME]; | ||
925 | char ecb_name[CRYPTO_MAX_ALG_NAME]; | ||
926 | |||
927 | /* Get the name of the requested block cipher (e.g. aes) */ | ||
928 | cipher_name = crypto_attr_alg_name(tb[1]); | ||
929 | if (IS_ERR(cipher_name)) | ||
930 | return PTR_ERR(cipher_name); | ||
931 | |||
932 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "heh(%s)", cipher_name) >= | ||
933 | CRYPTO_MAX_ALG_NAME) | ||
934 | return -ENAMETOOLONG; | ||
935 | |||
936 | if (snprintf(cmac_name, CRYPTO_MAX_ALG_NAME, "cmac(%s)", cipher_name) >= | ||
937 | CRYPTO_MAX_ALG_NAME) | ||
938 | return -ENAMETOOLONG; | ||
939 | |||
940 | if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= | ||
941 | CRYPTO_MAX_ALG_NAME) | ||
942 | return -ENAMETOOLONG; | ||
943 | |||
944 | return heh_create_common(tmpl, tb, full_name, cmac_name, "poly_hash", | ||
945 | ecb_name); | ||
946 | } | ||
947 | |||
948 | static struct crypto_template heh_tmpl = { | ||
949 | .name = "heh", | ||
950 | .create = heh_create, | ||
951 | .free = heh_free_instance, | ||
952 | .module = THIS_MODULE, | ||
953 | }; | ||
954 | |||
955 | static int heh_base_create(struct crypto_template *tmpl, struct rtattr **tb) | ||
956 | { | ||
957 | char full_name[CRYPTO_MAX_ALG_NAME]; | ||
958 | const char *cmac_name; | ||
959 | const char *poly_hash_name; | ||
960 | const char *ecb_name; | ||
961 | |||
962 | cmac_name = crypto_attr_alg_name(tb[1]); | ||
963 | if (IS_ERR(cmac_name)) | ||
964 | return PTR_ERR(cmac_name); | ||
965 | |||
966 | poly_hash_name = crypto_attr_alg_name(tb[2]); | ||
967 | if (IS_ERR(poly_hash_name)) | ||
968 | return PTR_ERR(poly_hash_name); | ||
969 | |||
970 | ecb_name = crypto_attr_alg_name(tb[3]); | ||
971 | if (IS_ERR(ecb_name)) | ||
972 | return PTR_ERR(ecb_name); | ||
973 | |||
974 | if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "heh_base(%s,%s,%s)", | ||
975 | cmac_name, poly_hash_name, ecb_name) >= | ||
976 | CRYPTO_MAX_ALG_NAME) | ||
977 | return -ENAMETOOLONG; | ||
978 | |||
979 | return heh_create_common(tmpl, tb, full_name, cmac_name, poly_hash_name, | ||
980 | ecb_name); | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * If HEH is instantiated as "heh_base" instead of "heh", then specific | ||
985 | * implementations of cmac, poly_hash, and ecb can be specified instead of just | ||
986 | * the cipher. | ||
987 | */ | ||
988 | static struct crypto_template heh_base_tmpl = { | ||
989 | .name = "heh_base", | ||
990 | .create = heh_base_create, | ||
991 | .free = heh_free_instance, | ||
992 | .module = THIS_MODULE, | ||
993 | }; | ||
994 | |||
995 | static int __init heh_module_init(void) | ||
996 | { | ||
997 | int err; | ||
998 | |||
999 | err = crypto_register_template(&heh_tmpl); | ||
1000 | if (err) | ||
1001 | return err; | ||
1002 | |||
1003 | err = crypto_register_template(&heh_base_tmpl); | ||
1004 | if (err) | ||
1005 | goto out_undo_heh; | ||
1006 | |||
1007 | err = crypto_register_shash(&poly_hash_alg); | ||
1008 | if (err) | ||
1009 | goto out_undo_heh_base; | ||
1010 | |||
1011 | return 0; | ||
1012 | |||
1013 | out_undo_heh_base: | ||
1014 | crypto_unregister_template(&heh_base_tmpl); | ||
1015 | out_undo_heh: | ||
1016 | crypto_unregister_template(&heh_tmpl); | ||
1017 | return err; | ||
1018 | } | ||
1019 | |||
1020 | static void __exit heh_module_exit(void) | ||
1021 | { | ||
1022 | crypto_unregister_template(&heh_tmpl); | ||
1023 | crypto_unregister_template(&heh_base_tmpl); | ||
1024 | crypto_unregister_shash(&poly_hash_alg); | ||
1025 | } | ||
1026 | |||
1027 | module_init(heh_module_init); | ||
1028 | module_exit(heh_module_exit); | ||
1029 | |||
1030 | MODULE_LICENSE("GPL"); | ||
1031 | MODULE_DESCRIPTION("Hash-Encrypt-Hash block cipher mode"); | ||
1032 | MODULE_ALIAS_CRYPTO("heh"); | ||
1033 | MODULE_ALIAS_CRYPTO("heh_base"); | ||