linux-omap-psp 2.6.37: add more recent version of OMAP3 kernel
[glsdk/meta-ti-glsdk.git] / recipes-kernel / linux / linux-omap-psp-2.6.37 / omap3 / 0001-linux-omap3-Add-OCF-support-to-2.6.37-kernel.patch
1 From d27b15fcf6b97acf8657ef6635e7e3ec95961f14 Mon Sep 17 00:00:00 2001
2 From: Greg Turner <gregturner@ti.com>
3 Date: Thu, 16 Jun 2011 09:35:15 -0500
4 Subject: [PATCH] linux-omap3: Add OCF support to 2.6.37 kernel
6 * Patch kernel with OCF (community source)
7 * Mod to defconfig to build OCF statically in to kernel
8 * cryptodev.ko is built as a loadable module
9 * cryptodev.h header file moved to include/crypto
11 Signed-off-by: Greg Turner <gregturner@ti.com>
12 ---
13  crypto/Kconfig             |    3 +
14  crypto/Makefile            |    2 +
15  crypto/ocf/ChangeLog       |  205 +++++
16  crypto/ocf/Config.in       |   36 +
17  crypto/ocf/Kconfig         |  119 +++
18  crypto/ocf/Makefile        |  124 +++
19  crypto/ocf/criov.c         |  215 ++++++
20  crypto/ocf/crypto.c        | 1784 ++++++++++++++++++++++++++++++++++++++++++++
21  crypto/ocf/cryptodev.c     | 1063 ++++++++++++++++++++++++++
22  crypto/ocf/cryptosoft.c    | 1210 ++++++++++++++++++++++++++++++
23  crypto/ocf/ocf-bench.c     |  436 +++++++++++
24  crypto/ocf/ocf-compat.h    |  294 ++++++++
25  crypto/ocf/random.c        |  322 ++++++++
26  crypto/ocf/rndtest.c       |  300 ++++++++
27  crypto/ocf/rndtest.h       |   54 ++
28  crypto/ocf/uio.h           |   54 ++
29  drivers/char/random.c      |   65 ++
30  fs/fcntl.c                 |    1 +
31  include/crypto/cryptodev.h |  479 ++++++++++++
32  include/linux/miscdevice.h |    1 +
33  include/linux/random.h     |   29 +
34  kernel/pid.c               |    1 +
35  22 files changed, 6797 insertions(+), 0 deletions(-)
36  create mode 100644 crypto/ocf/ChangeLog
37  create mode 100644 crypto/ocf/Config.in
38  create mode 100644 crypto/ocf/Kconfig
39  create mode 100644 crypto/ocf/Makefile
40  create mode 100644 crypto/ocf/criov.c
41  create mode 100644 crypto/ocf/crypto.c
42  create mode 100644 crypto/ocf/cryptodev.c
43  create mode 100644 crypto/ocf/cryptosoft.c
44  create mode 100644 crypto/ocf/ocf-bench.c
45  create mode 100644 crypto/ocf/ocf-compat.h
46  create mode 100644 crypto/ocf/random.c
47  create mode 100644 crypto/ocf/rndtest.c
48  create mode 100644 crypto/ocf/rndtest.h
49  create mode 100644 crypto/ocf/uio.h
50  create mode 100644 include/crypto/cryptodev.h
52 diff --git a/crypto/Kconfig b/crypto/Kconfig
53 index e4bac29..3154b22 100644
54 --- a/crypto/Kconfig
55 +++ b/crypto/Kconfig
56 @@ -844,3 +844,6 @@ config CRYPTO_ANSI_CPRNG
57  source "drivers/crypto/Kconfig"
58  
59  endif  # if CRYPTO
60 +
61 +source "crypto/ocf/Kconfig"
62 +
63 diff --git a/crypto/Makefile b/crypto/Makefile
64 index 423b7de..14ec6e9 100644
65 --- a/crypto/Makefile
66 +++ b/crypto/Makefile
67 @@ -86,6 +86,8 @@ obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
68  obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
69  obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
70  
71 +obj-$(CONFIG_OCF_OCF) += ocf/
72 +
73  #
74  # generic algorithms and the async_tx api
75  #
76 diff --git a/crypto/ocf/ChangeLog b/crypto/ocf/ChangeLog
77 new file mode 100644
78 index 0000000..8101340
79 --- /dev/null
80 +++ b/crypto/ocf/ChangeLog
81 @@ -0,0 +1,205 @@
82 +2009-08-20 19:05  toby
83 +
84 +       * cryptocteon/Makefile, ep80579/Makefile:
85 +       Add some makefile
86 +       changes to let a linux-2.4 build from head work when doing 'make
87 +       dep'.
88 +
89 +2009-08-12 21:42  davidm
90 +
91 +       * cryptodev.c:
92 +       
93 +       if crypto_dispatch fails,  make sure we bail immediately rather
94 +       than go on to do other error checking that we should only do if
95 +       crypto_dispatch worked.
96 +
97 +2009-08-12 21:30  davidm
98 +
99 +       * crypto.c:
100 +       
101 +       To avoid a race while we have the Q's unlcked we must stage the
102 +       blocking of the driver, so,  we assume the driver to be blocked, 
103 +       but if any unblocks come in while we are unlocked,  we do not block
104 +       the driver.
105 +       
106 +       At worst this means we will try again and get another block.
107 +       
108 +       Without this change we can lose an unblock request and give up on
109 +       the driver.
111 +2009-08-12 21:04  davidm
113 +       * crypto.c:
114 +       
115 +       kill_proc needs to be defined for kernels at or after 2.6.27
117 +2009-08-12 21:03  davidm
119 +       * ep80579/environment.mk:
120 +       
121 +       Allow OCF to be with the kernel,  or within the uClinux-dist
122 +       modules dir.
124 +2009-08-12 20:52  davidm
126 +       * ocf-compat.h:
127 +       
128 +       if IS_ERR_VALUE is not defined,  try and get it,  if it doesn't
129 +       exist, make our own.
131 +2009-05-27 01:52  davidm
133 +       * cryptodev.c:
134 +       
135 +       Order a crd combination of cipher/hmac just as ipsec would.  That
136 +       way it's easier to test results against cryptosoft and the HW
137 +       drivers that automatically fix this.
138 +       
139 +       cryptosoft processes algs in crd order,  thus they get different
140 +       results to when compared to cryptosoft.
141 +       
142 +       encrypt = cipher then hmac decrypt = hmac then cipher
144 +2009-05-27 01:48  davidm
146 +       * ocf-compat.h:
147 +       
148 +       Make sure we have an late_initcall macro even on old systems.
150 +2009-05-27 01:47  davidm
152 +       * cryptosoft.c:
153 +       
154 +       make sure that cryptosoft it started after the kernel crypto, 
155 +       otherwise it will not detect the kernels algs correctly.
157 +2009-05-27 01:41  davidm
159 +       * cryptocteon/: cavium_crypto.c, cryptocteon.c:
160 +       
161 +       aes/des/3des + md5/sha1 working with ipsec.
163 +2009-05-20 19:11  davidm
165 +       * cryptocteon/: README.txt, cavium_crypto.c, cryptocteon.c:
166 +       
167 +       Complete rework of the driver to setup a framework that can handle
168 +       all the combinations we need properly.
169 +       
170 +       Currently only DES is working.  Need to knock out a DES-MD5 version
171 +       so we can bench ipsec, then cleanup the AES/SHA bits.
173 +2009-05-14 20:53  asallawa
175 +       * ixp4xx/ixp4xx.c:
176 +       
177 +       Remove call to flush_scheduled_work(). Doesn't appear to be
178 +       necessary and can cause 'scheduling while atomic' kernel errors.
180 +2009-05-14 01:17  asallawa
182 +       * crypto.c:
183 +       
184 +       In particular situations, the crypto kernel thread can run forever
185 +       without ever calling wait_event_interruptible() which meant that it
186 +       could starve the rest of the system of CPU time.  To fix this, set
187 +       a maximum number of crypto operations to do consecutively before
188 +       calling schedule(). Default value is 1000, which should be less
189 +       than a second on most of our platforms.
191 +2009-05-08 20:10  davidm
193 +       * crypto.c:
194 +       
195 +       Currently we are queuing requests that fail for callback.  The
196 +       callback will never happen because the request failed.  This can
197 +       cause "busy loops" in the crypto support threads.
198 +       
199 +       Make sure we track the error status when processing a request to
200 +       the driver and only return 0 if everything worked.
201 +       
202 +       The original code had this right,  just bad locking,  the locking
203 +       fixes broke the error reporting.
205 +2009-03-31 20:41  davidm
207 +       * crypto.c:
208 +       
209 +       We cannot use sys_kill (not experted),  so switch to using send_sig
210 +       and the appropriate task finding functions.
212 +2009-03-27 03:50  gerg
214 +       * crypto.c:
215 +       
216 +       From linux-2.6.29 onwards there is no kill_proc() routine.  On
217 +       these newer systems call direct to the sys_kill() system call.
219 +2009-03-14 07:00  davidm
221 +       * ep80579/Makefile:
222 +       
223 +       merge makefile fix from head so that make clean works properly.
225 +2009-03-10 22:57  davidm
227 +       * hifn/hifn7751.c:
228 +       
229 +       Fix up cache line size for hifns,  not all get it right.  Patch
230 +       suggested and tested by Donald Lai <dlai@hifn.com>.
232 +2009-03-10 21:36  davidm
234 +       * ep80579/Makefile:
235 +       
236 +       Make sure we do not break non-tolapai builds (when running clean
237 +       etc)
239 +2009-02-07 00:32  davidm
241 +       * Kconfig, Makefile, cryptocteon/Makefile, cryptocteon/README.txt,
242 +       cryptocteon/cavium_crypto.c, cryptocteon/cryptocteon.c:
243 +       
244 +       First pass slightly working (3des/aes) crypto driver for the cavium
245 +       5010.
246 +       
247 +       Still a fair bit needed here (hmac+cipher and hash processing). 
248 +       Need to revisit the faster cavium example ipsec code.
250 +2009-01-20 01:06  gerg
252 +       * cryptosoft.c:
253 +       
254 +       Include linux/scatterlist.h instead of asm/scatterlist.h. Otherwise
255 +       we don't get the generic sg macros (only the asm specific
256 +       difinitions).
258 +2009-01-16 20:05  davidm
260 +       * cryptosoft.c:
261 +       
262 +       clean up some debug output (sign extension fo chars)
264 +2008-11-20 22:50  davidm
266 +       * ocf-compat.h:
267 +       
268 +       We need to include mm.h to get page_address on some kernels. 
269 +       reported by Paul Wouters <paul@xelerance.com>
271 +2008-11-14 06:14  davidm
273 +       * patches/linux-2.6.26-ocf.patch:
274 +       
275 +       make sure we patch into the kernel properly.  Paul Wouters
276 +       <paul@xelerance.com>
278 +2008-09-20 02:20  davidm
280 +       * crypto.c, cryptodev.h:
281 +       
282 +       Fix up session migration so that a driver can be removed while
283 +       ipsec is up and running and the tunnel will auto-migrate to a
284 +       backup driver (ie., cryptosoft).
285 +       
286 +       Brad Vrabete <brad.vrabete@intel.com>
287 diff --git a/crypto/ocf/Config.in b/crypto/ocf/Config.in
288 new file mode 100644
289 index 0000000..d722cba
290 --- /dev/null
291 +++ b/crypto/ocf/Config.in
292 @@ -0,0 +1,36 @@
293 +#############################################################################
295 +mainmenu_option next_comment
296 +comment 'OCF Configuration'
297 +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
298 +dep_mbool '  enable fips RNG checks (fips check on RNG data before use)' \
299 +                               CONFIG_OCF_FIPS $CONFIG_OCF_OCF
300 +dep_mbool '  enable harvesting entropy for /dev/random' \
301 +                               CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
302 +dep_tristate '  cryptodev (user space support)' \
303 +                               CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
304 +dep_tristate '  cryptosoft (software crypto engine)' \
305 +                               CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
306 +dep_tristate '  safenet (HW crypto engine)' \
307 +                               CONFIG_OCF_SAFE $CONFIG_OCF_OCF
308 +dep_tristate '  IXP4xx (HW crypto engine)' \
309 +                               CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
310 +dep_mbool    '  Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
311 +                               CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
312 +dep_tristate '  hifn (HW crypto engine)' \
313 +                               CONFIG_OCF_HIFN $CONFIG_OCF_OCF
314 +dep_tristate '  talitos (HW crypto engine)' \
315 +                               CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
316 +dep_tristate '  pasemi (HW crypto engine)' \
317 +                               CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
318 +dep_tristate '  ep80579 (HW crypto engine)' \
319 +                               CONFIG_OCF_EP80579 $CONFIG_OCF_OCF
320 +dep_tristate '  Micronas c7108 (HW crypto engine)' \
321 +                               CONFIG_OCF_C7108 $CONFIG_OCF_OCF
322 +dep_tristate '  ocfnull (does no crypto)' \
323 +                               CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
324 +dep_tristate '  ocf-bench (HW crypto in-kernel benchmark)' \
325 +                               CONFIG_OCF_BENCH $CONFIG_OCF_OCF
326 +endmenu
328 +#############################################################################
329 diff --git a/crypto/ocf/Kconfig b/crypto/ocf/Kconfig
330 new file mode 100644
331 index 0000000..b9c24ff
332 --- /dev/null
333 +++ b/crypto/ocf/Kconfig
334 @@ -0,0 +1,119 @@
335 +menu "OCF Configuration"
337 +config OCF_OCF
338 +       tristate "OCF (Open Cryptograhic Framework)"
339 +       help
340 +         A linux port of the OpenBSD/FreeBSD crypto framework.
342 +config OCF_RANDOMHARVEST
343 +       bool "crypto random --- harvest entropy for /dev/random"
344 +       depends on OCF_OCF
345 +       help
346 +         Includes code to harvest random numbers from devices that support it.
348 +config OCF_FIPS
349 +       bool "enable fips RNG checks"
350 +       depends on OCF_OCF && OCF_RANDOMHARVEST
351 +       help
352 +         Run all RNG provided data through a fips check before
353 +         adding it /dev/random's entropy pool.
355 +config OCF_CRYPTODEV
356 +       tristate "cryptodev (user space support)"
357 +       depends on OCF_OCF
358 +       help
359 +         The user space API to access crypto hardware.
361 +config OCF_CRYPTOSOFT
362 +       tristate "cryptosoft (software crypto engine)"
363 +       depends on OCF_OCF
364 +       help
365 +         A software driver for the OCF framework that uses
366 +         the kernel CryptoAPI.
368 +config OCF_SAFE
369 +       tristate "safenet (HW crypto engine)"
370 +       depends on OCF_OCF
371 +       help
372 +         A driver for a number of the safenet Excel crypto accelerators.
373 +         Currently tested and working on the 1141 and 1741.
375 +config OCF_IXP4XX
376 +       tristate "IXP4xx (HW crypto engine)"
377 +       depends on OCF_OCF
378 +       help
379 +         XScale IXP4xx crypto accelerator driver.  Requires the
380 +         Intel Access library.
382 +config OCF_IXP4XX_SHA1_MD5
383 +       bool "IXP4xx SHA1 and MD5 Hashing"
384 +       depends on OCF_IXP4XX
385 +       help
386 +         Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
387 +         Note: this is MUCH slower than using cryptosoft (software crypto engine).
389 +config OCF_HIFN
390 +       tristate "hifn (HW crypto engine)"
391 +       depends on OCF_OCF
392 +       help
393 +         OCF driver for various HIFN based crypto accelerators.
394 +         (7951, 7955, 7956, 7751, 7811)
396 +config OCF_HIFNHIPP
397 +       tristate "Hifn HIPP (HW packet crypto engine)"
398 +       depends on OCF_OCF
399 +       help
400 +         OCF driver for various HIFN (HIPP) based crypto accelerators
401 +         (7855)
403 +config OCF_TALITOS
404 +       tristate "talitos (HW crypto engine)"
405 +       depends on OCF_OCF
406 +       help
407 +         OCF driver for Freescale's security engine (SEC/talitos).
409 +config OCF_PASEMI
410 +       tristate "pasemi (HW crypto engine)"
411 +       depends on OCF_OCF && PPC_PASEMI
412 +       help
413 +         OCF driver for the PA Semi PWRficient DMA Engine
415 +config OCF_EP80579
416 +       tristate "ep80579 (HW crypto engine)"
417 +       depends on OCF_OCF
418 +       help
419 +         OCF driver for the Intel EP80579 Integrated Processor Product Line.
421 +config OCF_CRYPTOCTEON
422 +       tristate "cryptocteon (HW crypto engine)"
423 +       depends on OCF_OCF
424 +       help
425 +         OCF driver for the Cavium OCTEON Processors.
427 +config OCF_KIRKWOOD
428 +       tristate "kirkwood (HW crypto engine)"
429 +       depends on OCF_OCF
430 +       help
431 +         OCF driver for the Marvell Kirkwood (88F6xxx) Processors.
433 +config OCF_C7108
434 +       tristate "Micronas 7108 (HW crypto engine)"
435 +       depends on OCF_OCF
436 +       help
437 +         OCF driver for the Microna 7108 Cipher processors.
439 +config OCF_OCFNULL
440 +       tristate "ocfnull (fake crypto engine)"
441 +       depends on OCF_OCF
442 +       help
443 +         OCF driver for measuring ipsec overheads (does no crypto)
445 +config OCF_BENCH
446 +       tristate "ocf-bench (HW crypto in-kernel benchmark)"
447 +       depends on OCF_OCF
448 +       help
449 +         A very simple encryption test for the in-kernel interface
450 +         of OCF.  Also includes code to benchmark the IXP Access library
451 +         for comparison.
453 +endmenu
454 diff --git a/crypto/ocf/Makefile b/crypto/ocf/Makefile
455 new file mode 100644
456 index 0000000..fa951f4
457 --- /dev/null
458 +++ b/crypto/ocf/Makefile
459 @@ -0,0 +1,124 @@
460 +# for SGlinux builds
461 +-include $(ROOTDIR)/modules/.config
463 +OCF_OBJS = crypto.o criov.o
465 +ifdef CONFIG_OCF_RANDOMHARVEST
466 +       OCF_OBJS += random.o
467 +endif
469 +ifdef CONFIG_OCF_FIPS
470 +       OCF_OBJS += rndtest.o
471 +endif
473 +# Add in autoconf.h to get #defines for CONFIG_xxx
474 +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
475 +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
476 +       EXTRA_CFLAGS += -include $(AUTOCONF_H)
477 +       export EXTRA_CFLAGS
478 +endif
480 +ifndef obj
481 +       obj ?= .
482 +       _obj = subdir
483 +       mod-subdirs := safe hifn ixp4xx talitos ocfnull
484 +       export-objs += crypto.o criov.o random.o
485 +       list-multi += ocf.o
486 +       _slash :=
487 +else
488 +       _obj = obj
489 +       _slash := /
490 +endif
492 +EXTRA_CFLAGS += -I$(obj)/.
494 +obj-$(CONFIG_OCF_OCF)         += ocf.o
495 +obj-$(CONFIG_OCF_CRYPTODEV)   += cryptodev.o
496 +obj-$(CONFIG_OCF_CRYPTOSOFT)  += cryptosoft.o
497 +obj-$(CONFIG_OCF_BENCH)       += ocf-bench.o
499 +$(_obj)-$(CONFIG_OCF_SAFE)    += safe$(_slash)
500 +$(_obj)-$(CONFIG_OCF_HIFN)    += hifn$(_slash)
501 +$(_obj)-$(CONFIG_OCF_IXP4XX)  += ixp4xx$(_slash)
502 +$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
503 +$(_obj)-$(CONFIG_OCF_PASEMI)  += pasemi$(_slash)
504 +$(_obj)-$(CONFIG_OCF_EP80579) += ep80579$(_slash)
505 +$(_obj)-$(CONFIG_OCF_CRYPTOCTEON) += cryptocteon$(_slash)
506 +$(_obj)-$(CONFIG_OCF_KIRKWOOD) += kirkwood$(_slash)
507 +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
508 +$(_obj)-$(CONFIG_OCF_C7108) += c7108$(_slash)
510 +ocf-objs := $(OCF_OBJS)
512 +$(list-multi) dummy1: $(ocf-objs)
513 +       $(LD) -r -o $@ $(ocf-objs)
515 +.PHONY:
516 +clean:
517 +       rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
518 +       rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
520 +ifdef TOPDIR
521 +-include $(TOPDIR)/Rules.make
522 +endif
524 +#
525 +# release gen targets
526 +#
528 +.PHONY: patch
529 +patch:
530 +       REL=`date +%Y%m%d`; \
531 +               patch=ocf-linux-$$REL.patch; \
532 +               patch24=ocf-linux-24-$$REL.patch; \
533 +               patch26=ocf-linux-26-$$REL.patch; \
534 +               ( \
535 +                       find . -name Makefile; \
536 +                       find . -name Config.in; \
537 +                       find . -name Kconfig; \
538 +                       find . -name README; \
539 +                       find . -name '*.[ch]' | grep -v '.mod.c'; \
540 +               ) | while read t; do \
541 +                       diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
542 +               done > $$patch; \
543 +               cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
544 +               cat patches/linux-2.6.33-ocf.patch $$patch > $$patch26
546 +.PHONY: tarball
547 +tarball:
548 +       REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
549 +               CURDIR=`pwd`; \
550 +               rm -rf /tmp/ocf-linux-$$REL*; \
551 +               mkdir -p $$RELDIR/tools; \
552 +               cp README* $$RELDIR; \
553 +               cp patches/openss*.patch $$RELDIR; \
554 +               cp patches/crypto-tools.patch $$RELDIR; \
555 +               cp tools/[!C]* $$RELDIR/tools; \
556 +               cd ..; \
557 +               tar cvf $$RELDIR/ocf-linux.tar \
558 +                                       --exclude=CVS \
559 +                                       --exclude=.* \
560 +                                       --exclude=*.o \
561 +                                       --exclude=*.ko \
562 +                                       --exclude=*.mod.* \
563 +                                       --exclude=README* \
564 +                                       --exclude=ocf-*.patch \
565 +                                       --exclude=ocf/patches/openss*.patch \
566 +                                       --exclude=ocf/patches/crypto-tools.patch \
567 +                                       --exclude=ocf/tools \
568 +                                       ocf; \
569 +               gzip -9 $$RELDIR/ocf-linux.tar; \
570 +               cd /tmp; \
571 +               tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
572 +               gzip -9 ocf-linux-$$REL.tar; \
573 +               cd $$CURDIR/../../user; \
574 +               rm -rf /tmp/crypto-tools-$$REL*; \
575 +               tar cvf /tmp/crypto-tools-$$REL.tar \
576 +                                       --exclude=CVS \
577 +                                       --exclude=.* \
578 +                                       --exclude=*.o \
579 +                                       --exclude=cryptotest \
580 +                                       --exclude=cryptokeytest \
581 +                                       crypto-tools; \
582 +               gzip -9 /tmp/crypto-tools-$$REL.tar
584 diff --git a/crypto/ocf/criov.c b/crypto/ocf/criov.c
585 new file mode 100644
586 index 0000000..5fe9040
587 --- /dev/null
588 +++ b/crypto/ocf/criov.c
589 @@ -0,0 +1,215 @@
590 +/*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $        */
592 +/*
593 + * Linux port done by David McCullough <david_mccullough@mcafee.com>
594 + * Copyright (C) 2006-2010 David McCullough
595 + * Copyright (C) 2004-2005 Intel Corporation.
596 + * The license and original author are listed below.
597 + *
598 + * Copyright (c) 1999 Theo de Raadt
599 + *
600 + * Redistribution and use in source and binary forms, with or without
601 + * modification, are permitted provided that the following conditions
602 + * are met:
603 + *
604 + * 1. Redistributions of source code must retain the above copyright
605 + *   notice, this list of conditions and the following disclaimer.
606 + * 2. Redistributions in binary form must reproduce the above copyright
607 + *   notice, this list of conditions and the following disclaimer in the
608 + *   documentation and/or other materials provided with the distribution.
609 + * 3. The name of the author may not be used to endorse or promote products
610 + *   derived from this software without specific prior written permission.
611 + *
612 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
613 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
614 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
615 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
616 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
617 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
618 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
619 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
620 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
621 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
622 + *
623 +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
624 + */
626 +#ifndef AUTOCONF_INCLUDED
627 +#include <linux/config.h>
628 +#endif
629 +#include <linux/module.h>
630 +#include <linux/init.h>
631 +#include <linux/slab.h>
632 +#include <linux/uio.h>
633 +#include <linux/skbuff.h>
634 +#include <linux/kernel.h>
635 +#include <linux/mm.h>
636 +#include <asm/io.h>
638 +#include <uio.h>
639 +#include <crypto/cryptodev.h>
641 +/*
642 + * This macro is only for avoiding code duplication, as we need to skip
643 + * given number of bytes in the same way in three functions below.
644 + */
645 +#define        CUIO_SKIP()     do {                                            \
646 +       KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));           \
647 +       KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));           \
648 +       while (off > 0) {                                               \
649 +               KASSERT(iol >= 0, ("%s: empty in skip", __func__));     \
650 +               if (off < iov->iov_len)                                 \
651 +                       break;                                          \
652 +               off -= iov->iov_len;                                    \
653 +               iol--;                                                  \
654 +               iov++;                                                  \
655 +       }                                                               \
656 +} while (0)
658 +void
659 +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
660 +{
661 +       struct iovec *iov = uio->uio_iov;
662 +       int iol = uio->uio_iovcnt;
663 +       unsigned count;
665 +       CUIO_SKIP();
666 +       while (len > 0) {
667 +               KASSERT(iol >= 0, ("%s: empty", __func__));
668 +               count = min((int)(iov->iov_len - off), len);
669 +               memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
670 +               len -= count;
671 +               cp += count;
672 +               off = 0;
673 +               iol--;
674 +               iov++;
675 +       }
676 +}
678 +void
679 +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
680 +{
681 +       struct iovec *iov = uio->uio_iov;
682 +       int iol = uio->uio_iovcnt;
683 +       unsigned count;
685 +       CUIO_SKIP();
686 +       while (len > 0) {
687 +               KASSERT(iol >= 0, ("%s: empty", __func__));
688 +               count = min((int)(iov->iov_len - off), len);
689 +               memcpy(((caddr_t)iov->iov_base) + off, cp, count);
690 +               len -= count;
691 +               cp += count;
692 +               off = 0;
693 +               iol--;
694 +               iov++;
695 +       }
696 +}
698 +/*
699 + * Return a pointer to iov/offset of location in iovec list.
700 + */
701 +struct iovec *
702 +cuio_getptr(struct uio *uio, int loc, int *off)
703 +{
704 +       struct iovec *iov = uio->uio_iov;
705 +       int iol = uio->uio_iovcnt;
707 +       while (loc >= 0) {
708 +               /* Normal end of search */
709 +               if (loc < iov->iov_len) {
710 +                       *off = loc;
711 +                       return (iov);
712 +               }
714 +               loc -= iov->iov_len;
715 +               if (iol == 0) {
716 +                       if (loc == 0) {
717 +                               /* Point at the end of valid data */
718 +                               *off = iov->iov_len;
719 +                               return (iov);
720 +                       } else
721 +                               return (NULL);
722 +               } else {
723 +                       iov++, iol--;
724 +               }
725 +       }
727 +       return (NULL);
728 +}
730 +EXPORT_SYMBOL(cuio_copyback);
731 +EXPORT_SYMBOL(cuio_copydata);
732 +EXPORT_SYMBOL(cuio_getptr);
735 +static void
736 +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
737 +{
738 +       int i;
739 +       if (offset < skb_headlen(skb)) {
740 +               memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
741 +               len -= skb_headlen(skb);
742 +               cp += skb_headlen(skb);
743 +       }
744 +       offset -= skb_headlen(skb);
745 +       for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
746 +               if (offset < skb_shinfo(skb)->frags[i].size) {
747 +                       memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
748 +                                       skb_shinfo(skb)->frags[i].page_offset,
749 +                                       cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
750 +                       len -= skb_shinfo(skb)->frags[i].size;
751 +                       cp += skb_shinfo(skb)->frags[i].size;
752 +               }
753 +               offset -= skb_shinfo(skb)->frags[i].size;
754 +       }
755 +}
757 +void
758 +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
759 +{
761 +       if ((flags & CRYPTO_F_SKBUF) != 0)
762 +               skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
763 +       else if ((flags & CRYPTO_F_IOV) != 0)
764 +               cuio_copyback((struct uio *)buf, off, size, in);
765 +       else
766 +               bcopy(in, buf + off, size);
767 +}
769 +void
770 +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
771 +{
773 +       if ((flags & CRYPTO_F_SKBUF) != 0)
774 +               skb_copy_bits((struct sk_buff *)buf, off, out, size);
775 +       else if ((flags & CRYPTO_F_IOV) != 0)
776 +               cuio_copydata((struct uio *)buf, off, size, out);
777 +       else
778 +               bcopy(buf + off, out, size);
779 +}
781 +int
782 +crypto_apply(int flags, caddr_t buf, int off, int len,
783 +    int (*f)(void *, void *, u_int), void *arg)
784 +{
785 +#if 0
786 +       int error;
788 +       if ((flags & CRYPTO_F_SKBUF) != 0)
789 +               error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
790 +       else if ((flags & CRYPTO_F_IOV) != 0)
791 +               error = cuio_apply((struct uio *)buf, off, len, f, arg);
792 +       else
793 +               error = (*f)(arg, buf + off, len);
794 +       return (error);
795 +#else
796 +       KASSERT(0, ("crypto_apply not implemented!\n"));
797 +#endif
798 +       return 0;
799 +}
801 +EXPORT_SYMBOL(crypto_copyback);
802 +EXPORT_SYMBOL(crypto_copydata);
803 +EXPORT_SYMBOL(crypto_apply);
805 diff --git a/crypto/ocf/crypto.c b/crypto/ocf/crypto.c
806 new file mode 100644
807 index 0000000..9e60ac9
808 --- /dev/null
809 +++ b/crypto/ocf/crypto.c
810 @@ -0,0 +1,1784 @@
811 +/*-
812 + * Linux port done by David McCullough <david_mccullough@mcafee.com>
813 + * Copyright (C) 2006-2010 David McCullough
814 + * Copyright (C) 2004-2005 Intel Corporation.
815 + * The license and original author are listed below.
816 + *
817 + * Redistribution and use in source and binary forms, with or without
818 + * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
819 + *
820 + * modification, are permitted provided that the following conditions
821 + * are met:
822 + * 1. Redistributions of source code must retain the above copyright
823 + *    notice, this list of conditions and the following disclaimer.
824 + * 2. Redistributions in binary form must reproduce the above copyright
825 + *    notice, this list of conditions and the following disclaimer in the
826 + *    documentation and/or other materials provided with the distribution.
827 + *
828 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
829 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
830 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
831 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
832 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
833 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
834 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
835 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
836 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
837 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
838 + */
840 +#if 0
841 +#include <sys/cdefs.h>
842 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
843 +#endif
845 +/*
846 + * Cryptographic Subsystem.
847 + *
848 + * This code is derived from the Openbsd Cryptographic Framework (OCF)
849 + * that has the copyright shown below.  Very little of the original
850 + * code remains.
851 + */
852 +/*-
853 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
854 + *
855 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
856 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
857 + * supported the development of this code.
858 + *
859 + * Copyright (c) 2000, 2001 Angelos D. Keromytis
860 + *
861 + * Permission to use, copy, and modify this software with or without fee
862 + * is hereby granted, provided that this entire notice is included in
863 + * all source code copies of any software which is or includes a copy or
864 + * modification of this software.
865 + *
866 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
867 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
868 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
869 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
870 + * PURPOSE.
871 + *
872 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
873 + */
876 +#ifndef AUTOCONF_INCLUDED
877 +#include <linux/config.h>
878 +#endif
879 +#include <linux/module.h>
880 +#include <linux/init.h>
881 +#include <linux/list.h>
882 +#include <linux/slab.h>
883 +#include <linux/wait.h>
884 +#include <linux/sched.h>
885 +#include <linux/spinlock.h>
886 +#include <linux/version.h>
887 +#include <crypto/cryptodev.h>
889 +/*
890 + * keep track of whether or not we have been initialised, a big
891 + * issue if we are linked into the kernel and a driver gets started before
892 + * us
893 + */
894 +static int crypto_initted = 0;
896 +/*
897 + * Crypto drivers register themselves by allocating a slot in the
898 + * crypto_drivers table with crypto_get_driverid() and then registering
899 + * each algorithm they support with crypto_register() and crypto_kregister().
900 + */
902 +/*
903 + * lock on driver table
904 + * we track its state as spin_is_locked does not do anything on non-SMP boxes
905 + */
906 +static spinlock_t      crypto_drivers_lock;
907 +static int                     crypto_drivers_locked;          /* for non-SMP boxes */
909 +#define        CRYPTO_DRIVER_LOCK() \
910 +                       ({ \
911 +                               spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
912 +                               crypto_drivers_locked = 1; \
913 +                               dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
914 +                        })
915 +#define        CRYPTO_DRIVER_UNLOCK() \
916 +                       ({ \
917 +                               dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
918 +                               crypto_drivers_locked = 0; \
919 +                               spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
920 +                        })
921 +#define        CRYPTO_DRIVER_ASSERT() \
922 +                       ({ \
923 +                               if (!crypto_drivers_locked) { \
924 +                                       dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
925 +                               } \
926 +                        })
928 +/*
929 + * Crypto device/driver capabilities structure.
930 + *
931 + * Synchronization:
932 + * (d) - protected by CRYPTO_DRIVER_LOCK()
933 + * (q) - protected by CRYPTO_Q_LOCK()
934 + * Not tagged fields are read-only.
935 + */
936 +struct cryptocap {
937 +       device_t        cc_dev;                 /* (d) device/driver */
938 +       u_int32_t       cc_sessions;            /* (d) # of sessions */
939 +       u_int32_t       cc_koperations;         /* (d) # os asym operations */
940 +       /*
941 +        * Largest possible operator length (in bits) for each type of
942 +        * encryption algorithm. XXX not used
943 +        */
944 +       u_int16_t       cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
945 +       u_int8_t        cc_alg[CRYPTO_ALGORITHM_MAX + 1];
946 +       u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
948 +       int             cc_flags;               /* (d) flags */
949 +#define CRYPTOCAP_F_CLEANUP    0x80000000      /* needs resource cleanup */
950 +       int             cc_qblocked;            /* (q) symmetric q blocked */
951 +       int             cc_kqblocked;           /* (q) asymmetric q blocked */
953 +       int             cc_unqblocked;          /* (q) symmetric q blocked */
954 +       int             cc_unkqblocked;         /* (q) asymmetric q blocked */
955 +};
956 +static struct cryptocap *crypto_drivers = NULL;
957 +static int crypto_drivers_num = 0;
959 +/*
960 + * There are two queues for crypto requests; one for symmetric (e.g.
961 + * cipher) operations and one for asymmetric (e.g. MOD)operations.
962 + * A single mutex is used to lock access to both queues.  We could
963 + * have one per-queue but having one simplifies handling of block/unblock
964 + * operations.
965 + */
966 +static int crp_sleep = 0;
967 +static LIST_HEAD(crp_q);               /* request queues */
968 +static LIST_HEAD(crp_kq);
970 +static spinlock_t crypto_q_lock;
972 +int crypto_all_qblocked = 0;  /* protect with Q_LOCK */
973 +module_param(crypto_all_qblocked, int, 0444);
974 +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
976 +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
977 +module_param(crypto_all_kqblocked, int, 0444);
978 +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
980 +#define        CRYPTO_Q_LOCK() \
981 +                       ({ \
982 +                               spin_lock_irqsave(&crypto_q_lock, q_flags); \
983 +                               dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
984 +                        })
985 +#define        CRYPTO_Q_UNLOCK() \
986 +                       ({ \
987 +                               dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
988 +                               spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
989 +                        })
991 +/*
992 + * There are two queues for processing completed crypto requests; one
993 + * for the symmetric and one for the asymmetric ops.  We only need one
994 + * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
995 + * mutex is used to lock access to both queues.  Note that this lock
996 + * must be separate from the lock on request queues to insure driver
997 + * callbacks don't generate lock order reversals.
998 + */
999 +static LIST_HEAD(crp_ret_q);           /* callback queues */
1000 +static LIST_HEAD(crp_ret_kq);
1002 +static spinlock_t crypto_ret_q_lock;
1003 +#define        CRYPTO_RETQ_LOCK() \
1004 +                       ({ \
1005 +                               spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
1006 +                               dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
1007 +                        })
1008 +#define        CRYPTO_RETQ_UNLOCK() \
1009 +                       ({ \
1010 +                               dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
1011 +                               spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
1012 +                        })
1013 +#define        CRYPTO_RETQ_EMPTY()     (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
1015 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1016 +static kmem_cache_t *cryptop_zone;
1017 +static kmem_cache_t *cryptodesc_zone;
1018 +#else
1019 +static struct kmem_cache *cryptop_zone;
1020 +static struct kmem_cache *cryptodesc_zone;
1021 +#endif
1023 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1024 +#include <linux/sched.h>
1025 +#define        kill_proc(p,s,v)        send_sig(s,find_task_by_vpid(p),0)
1026 +#endif
1028 +#define debug crypto_debug
1029 +int crypto_debug = 0;
1030 +module_param(crypto_debug, int, 0644);
1031 +MODULE_PARM_DESC(crypto_debug, "Enable debug");
1032 +EXPORT_SYMBOL(crypto_debug);
1034 +/*
1035 + * Maximum number of outstanding crypto requests before we start
1036 + * failing requests.  We need this to prevent DOS when too many
1037 + * requests are arriving for us to keep up.  Otherwise we will
1038 + * run the system out of memory.  Since crypto is slow,  we are
1039 + * usually the bottleneck that needs to say, enough is enough.
1040 + *
1041 + * We cannot print errors when this condition occurs,  we are already too
1042 + * slow,  printing anything will just kill us
1043 + */
1045 +static int crypto_q_cnt = 0;
1046 +module_param(crypto_q_cnt, int, 0444);
1047 +MODULE_PARM_DESC(crypto_q_cnt,
1048 +               "Current number of outstanding crypto requests");
1050 +static int crypto_q_max = 1000;
1051 +module_param(crypto_q_max, int, 0644);
1052 +MODULE_PARM_DESC(crypto_q_max,
1053 +               "Maximum number of outstanding crypto requests");
1055 +#define bootverbose crypto_verbose
1056 +static int crypto_verbose = 0;
1057 +module_param(crypto_verbose, int, 0644);
1058 +MODULE_PARM_DESC(crypto_verbose,
1059 +               "Enable verbose crypto startup");
1061 +int    crypto_usercrypto = 1;  /* userland may do crypto reqs */
1062 +module_param(crypto_usercrypto, int, 0644);
1063 +MODULE_PARM_DESC(crypto_usercrypto,
1064 +          "Enable/disable user-mode access to crypto support");
1066 +int    crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
1067 +module_param(crypto_userasymcrypto, int, 0644);
1068 +MODULE_PARM_DESC(crypto_userasymcrypto,
1069 +          "Enable/disable user-mode access to asymmetric crypto support");
1071 +int    crypto_devallowsoft = 0;        /* only use hardware crypto */
1072 +module_param(crypto_devallowsoft, int, 0644);
1073 +MODULE_PARM_DESC(crypto_devallowsoft,
1074 +          "Enable/disable use of software crypto support");
1076 +/*
1077 + * This parameter controls the maximum number of crypto operations to 
1078 + * do consecutively in the crypto kernel thread before scheduling to allow 
1079 + * other processes to run. Without it, it is possible to get into a 
1080 + * situation where the crypto thread never allows any other processes to run.
1081 + * Default to 1000 which should be less than one second.
1082 + */
1083 +static int crypto_max_loopcount = 1000;
1084 +module_param(crypto_max_loopcount, int, 0644);
1085 +MODULE_PARM_DESC(crypto_max_loopcount,
1086 +          "Maximum number of crypto ops to do before yielding to other processes");
1088 +static pid_t   cryptoproc = (pid_t) -1;
1089 +static struct  completion cryptoproc_exited;
1090 +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
1091 +static pid_t   cryptoretproc = (pid_t) -1;
1092 +static struct  completion cryptoretproc_exited;
1093 +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
1095 +static int crypto_proc(void *arg);
1096 +static int crypto_ret_proc(void *arg);
1097 +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
1098 +static int crypto_kinvoke(struct cryptkop *krp, int flags);
1099 +static void crypto_exit(void);
1100 +static  int crypto_init(void);
1102 +static struct cryptostats cryptostats;
1104 +static struct cryptocap *
1105 +crypto_checkdriver(u_int32_t hid)
1106 +{
1107 +       if (crypto_drivers == NULL)
1108 +               return NULL;
1109 +       return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
1110 +}
1112 +/*
1113 + * Compare a driver's list of supported algorithms against another
1114 + * list; return non-zero if all algorithms are supported.
1115 + */
1116 +static int
1117 +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
1118 +{
1119 +       const struct cryptoini *cr;
1121 +       /* See if all the algorithms are supported. */
1122 +       for (cr = cri; cr; cr = cr->cri_next)
1123 +               if (cap->cc_alg[cr->cri_alg] == 0)
1124 +                       return 0;
1125 +       return 1;
1126 +}
1128 +/*
1129 + * Select a driver for a new session that supports the specified
1130 + * algorithms and, optionally, is constrained according to the flags.
1131 + * The algorithm we use here is pretty stupid; just use the
1132 + * first driver that supports all the algorithms we need. If there
1133 + * are multiple drivers we choose the driver with the fewest active
1134 + * sessions.  We prefer hardware-backed drivers to software ones.
1135 + *
1136 + * XXX We need more smarts here (in real life too, but that's
1137 + * XXX another story altogether).
1138 + */
1139 +static struct cryptocap *
1140 +crypto_select_driver(const struct cryptoini *cri, int flags)
1141 +{
1142 +       struct cryptocap *cap, *best;
1143 +       int match, hid;
1145 +       CRYPTO_DRIVER_ASSERT();
1147 +       /*
1148 +        * Look first for hardware crypto devices if permitted.
1149 +        */
1150 +       if (flags & CRYPTOCAP_F_HARDWARE)
1151 +               match = CRYPTOCAP_F_HARDWARE;
1152 +       else
1153 +               match = CRYPTOCAP_F_SOFTWARE;
1154 +       best = NULL;
1155 +again:
1156 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
1157 +               cap = &crypto_drivers[hid];
1158 +               /*
1159 +                * If it's not initialized, is in the process of
1160 +                * going away, or is not appropriate (hardware
1161 +                * or software based on match), then skip.
1162 +                */
1163 +               if (cap->cc_dev == NULL ||
1164 +                   (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
1165 +                   (cap->cc_flags & match) == 0)
1166 +                       continue;
1168 +               /* verify all the algorithms are supported. */
1169 +               if (driver_suitable(cap, cri)) {
1170 +                       if (best == NULL ||
1171 +                           cap->cc_sessions < best->cc_sessions)
1172 +                               best = cap;
1173 +               }
1174 +       }
1175 +       if (best != NULL)
1176 +               return best;
1177 +       if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1178 +               /* sort of an Algol 68-style for loop */
1179 +               match = CRYPTOCAP_F_SOFTWARE;
1180 +               goto again;
1181 +       }
1182 +       return best;
1183 +}
1185 +/*
1186 + * Create a new session.  The crid argument specifies a crypto
1187 + * driver to use or constraints on a driver to select (hardware
1188 + * only, software only, either).  Whatever driver is selected
1189 + * must be capable of the requested crypto algorithms.
1190 + */
1191 +int
1192 +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
1193 +{
1194 +       struct cryptocap *cap;
1195 +       u_int32_t hid, lid;
1196 +       int err;
1197 +       unsigned long d_flags;
1199 +       CRYPTO_DRIVER_LOCK();
1200 +       if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1201 +               /*
1202 +                * Use specified driver; verify it is capable.
1203 +                */
1204 +               cap = crypto_checkdriver(crid);
1205 +               if (cap != NULL && !driver_suitable(cap, cri))
1206 +                       cap = NULL;
1207 +       } else {
1208 +               /*
1209 +                * No requested driver; select based on crid flags.
1210 +                */
1211 +               cap = crypto_select_driver(cri, crid);
1212 +               /*
1213 +                * if NULL then can't do everything in one session.
1214 +                * XXX Fix this. We need to inject a "virtual" session
1215 +                * XXX layer right about here.
1216 +                */
1217 +       }
1218 +       if (cap != NULL) {
1219 +               /* Call the driver initialization routine. */
1220 +               hid = cap - crypto_drivers;
1221 +               lid = hid;              /* Pass the driver ID. */
1222 +               cap->cc_sessions++;
1223 +               CRYPTO_DRIVER_UNLOCK();
1224 +               err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
1225 +               CRYPTO_DRIVER_LOCK();
1226 +               if (err == 0) {
1227 +                       (*sid) = (cap->cc_flags & 0xff000000)
1228 +                              | (hid & 0x00ffffff);
1229 +                       (*sid) <<= 32;
1230 +                       (*sid) |= (lid & 0xffffffff);
1231 +               } else
1232 +                       cap->cc_sessions--;
1233 +       } else
1234 +               err = EINVAL;
1235 +       CRYPTO_DRIVER_UNLOCK();
1236 +       return err;
1237 +}
1239 +static void
1240 +crypto_remove(struct cryptocap *cap)
1241 +{
1242 +       CRYPTO_DRIVER_ASSERT();
1243 +       if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
1244 +               bzero(cap, sizeof(*cap));
1245 +}
1247 +/*
1248 + * Delete an existing session (or a reserved session on an unregistered
1249 + * driver).
1250 + */
1251 +int
1252 +crypto_freesession(u_int64_t sid)
1253 +{
1254 +       struct cryptocap *cap;
1255 +       u_int32_t hid;
1256 +       int err = 0;
1257 +       unsigned long d_flags;
1259 +       dprintk("%s()\n", __FUNCTION__);
1260 +       CRYPTO_DRIVER_LOCK();
1262 +       if (crypto_drivers == NULL) {
1263 +               err = EINVAL;
1264 +               goto done;
1265 +       }
1267 +       /* Determine two IDs. */
1268 +       hid = CRYPTO_SESID2HID(sid);
1270 +       if (hid >= crypto_drivers_num) {
1271 +               dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
1272 +               err = ENOENT;
1273 +               goto done;
1274 +       }
1275 +       cap = &crypto_drivers[hid];
1277 +       if (cap->cc_dev) {
1278 +               CRYPTO_DRIVER_UNLOCK();
1279 +               /* Call the driver cleanup routine, if available, unlocked. */
1280 +               err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
1281 +               CRYPTO_DRIVER_LOCK();
1282 +       }
1284 +       if (cap->cc_sessions)
1285 +               cap->cc_sessions--;
1287 +       if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1288 +               crypto_remove(cap);
1290 +done:
1291 +       CRYPTO_DRIVER_UNLOCK();
1292 +       return err;
1293 +}
1295 +/*
1296 + * Return an unused driver id.  Used by drivers prior to registering
1297 + * support for the algorithms they handle.
1298 + */
1299 +int32_t
1300 +crypto_get_driverid(device_t dev, int flags)
1301 +{
1302 +       struct cryptocap *newdrv;
1303 +       int i;
1304 +       unsigned long d_flags;
1306 +       if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1307 +               printf("%s: no flags specified when registering driver\n",
1308 +                   device_get_nameunit(dev));
1309 +               return -1;
1310 +       }
1312 +       CRYPTO_DRIVER_LOCK();
1314 +       for (i = 0; i < crypto_drivers_num; i++) {
1315 +               if (crypto_drivers[i].cc_dev == NULL &&
1316 +                   (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
1317 +                       break;
1318 +               }
1319 +       }
1321 +       /* Out of entries, allocate some more. */
1322 +       if (i == crypto_drivers_num) {
1323 +               /* Be careful about wrap-around. */
1324 +               if (2 * crypto_drivers_num <= crypto_drivers_num) {
1325 +                       CRYPTO_DRIVER_UNLOCK();
1326 +                       printk("crypto: driver count wraparound!\n");
1327 +                       return -1;
1328 +               }
1330 +               newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
1331 +                               GFP_KERNEL);
1332 +               if (newdrv == NULL) {
1333 +                       CRYPTO_DRIVER_UNLOCK();
1334 +                       printk("crypto: no space to expand driver table!\n");
1335 +                       return -1;
1336 +               }
1338 +               memcpy(newdrv, crypto_drivers,
1339 +                               crypto_drivers_num * sizeof(struct cryptocap));
1340 +               memset(&newdrv[crypto_drivers_num], 0,
1341 +                               crypto_drivers_num * sizeof(struct cryptocap));
1343 +               crypto_drivers_num *= 2;
1345 +               kfree(crypto_drivers);
1346 +               crypto_drivers = newdrv;
1347 +       }
1349 +       /* NB: state is zero'd on free */
1350 +       crypto_drivers[i].cc_sessions = 1;      /* Mark */
1351 +       crypto_drivers[i].cc_dev = dev;
1352 +       crypto_drivers[i].cc_flags = flags;
1353 +       if (bootverbose)
1354 +               printf("crypto: assign %s driver id %u, flags %u\n",
1355 +                   device_get_nameunit(dev), i, flags);
1357 +       CRYPTO_DRIVER_UNLOCK();
1359 +       return i;
1360 +}
1362 +/*
1363 + * Lookup a driver by name.  We match against the full device
1364 + * name and unit, and against just the name.  The latter gives
1365 + * us a simple widlcarding by device name.  On success return the
1366 + * driver/hardware identifier; otherwise return -1.
1367 + */
1368 +int
1369 +crypto_find_driver(const char *match)
1370 +{
1371 +       int i, len = strlen(match);
1372 +       unsigned long d_flags;
1374 +       CRYPTO_DRIVER_LOCK();
1375 +       for (i = 0; i < crypto_drivers_num; i++) {
1376 +               device_t dev = crypto_drivers[i].cc_dev;
1377 +               if (dev == NULL ||
1378 +                   (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
1379 +                       continue;
1380 +               if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
1381 +                   strncmp(match, device_get_name(dev), len) == 0)
1382 +                       break;
1383 +       }
1384 +       CRYPTO_DRIVER_UNLOCK();
1385 +       return i < crypto_drivers_num ? i : -1;
1386 +}
1388 +/*
1389 + * Return the device_t for the specified driver or NULL
1390 + * if the driver identifier is invalid.
1391 + */
1392 +device_t
1393 +crypto_find_device_byhid(int hid)
1394 +{
1395 +       struct cryptocap *cap = crypto_checkdriver(hid);
1396 +       return cap != NULL ? cap->cc_dev : NULL;
1397 +}
1399 +/*
1400 + * Return the device/driver capabilities.
1401 + */
1402 +int
1403 +crypto_getcaps(int hid)
1404 +{
1405 +       struct cryptocap *cap = crypto_checkdriver(hid);
1406 +       return cap != NULL ? cap->cc_flags : 0;
1407 +}
1409 +/*
1410 + * Register support for a key-related algorithm.  This routine
1411 + * is called once for each algorithm supported a driver.
1412 + */
1413 +int
1414 +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
1415 +{
1416 +       struct cryptocap *cap;
1417 +       int err;
1418 +       unsigned long d_flags;
1420 +       dprintk("%s()\n", __FUNCTION__);
1421 +       CRYPTO_DRIVER_LOCK();
1423 +       cap = crypto_checkdriver(driverid);
1424 +       if (cap != NULL &&
1425 +           (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1426 +               /*
1427 +                * XXX Do some performance testing to determine placing.
1428 +                * XXX We probably need an auxiliary data structure that
1429 +                * XXX describes relative performances.
1430 +                */
1432 +               cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1433 +               if (bootverbose)
1434 +                       printf("crypto: %s registers key alg %u flags %u\n"
1435 +                               , device_get_nameunit(cap->cc_dev)
1436 +                               , kalg
1437 +                               , flags
1438 +                       );
1439 +               err = 0;
1440 +       } else
1441 +               err = EINVAL;
1443 +       CRYPTO_DRIVER_UNLOCK();
1444 +       return err;
1445 +}
1447 +/*
1448 + * Register support for a non-key-related algorithm.  This routine
1449 + * is called once for each such algorithm supported by a driver.
1450 + */
1451 +int
1452 +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
1453 +    u_int32_t flags)
1454 +{
1455 +       struct cryptocap *cap;
1456 +       int err;
1457 +       unsigned long d_flags;
1459 +       dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
1460 +                       driverid, alg, maxoplen, flags);
1462 +       CRYPTO_DRIVER_LOCK();
1464 +       cap = crypto_checkdriver(driverid);
1465 +       /* NB: algorithms are in the range [1..max] */
1466 +       if (cap != NULL &&
1467 +           (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
1468 +               /*
1469 +                * XXX Do some performance testing to determine placing.
1470 +                * XXX We probably need an auxiliary data structure that
1471 +                * XXX describes relative performances.
1472 +                */
1474 +               cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1475 +               cap->cc_max_op_len[alg] = maxoplen;
1476 +               if (bootverbose)
1477 +                       printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
1478 +                               , device_get_nameunit(cap->cc_dev)
1479 +                               , alg
1480 +                               , flags
1481 +                               , maxoplen
1482 +                       );
1483 +               cap->cc_sessions = 0;           /* Unmark */
1484 +               err = 0;
1485 +       } else
1486 +               err = EINVAL;
1488 +       CRYPTO_DRIVER_UNLOCK();
1489 +       return err;
1490 +}
1492 +static void
1493 +driver_finis(struct cryptocap *cap)
1494 +{
1495 +       u_int32_t ses, kops;
1497 +       CRYPTO_DRIVER_ASSERT();
1499 +       ses = cap->cc_sessions;
1500 +       kops = cap->cc_koperations;
1501 +       bzero(cap, sizeof(*cap));
1502 +       if (ses != 0 || kops != 0) {
1503 +               /*
1504 +                * If there are pending sessions,
1505 +                * just mark as invalid.
1506 +                */
1507 +               cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1508 +               cap->cc_sessions = ses;
1509 +               cap->cc_koperations = kops;
1510 +       }
1511 +}
1513 +/*
1514 + * Unregister a crypto driver. If there are pending sessions using it,
1515 + * leave enough information around so that subsequent calls using those
1516 + * sessions will correctly detect the driver has been unregistered and
1517 + * reroute requests.
1518 + */
1519 +int
1520 +crypto_unregister(u_int32_t driverid, int alg)
1521 +{
1522 +       struct cryptocap *cap;
1523 +       int i, err;
1524 +       unsigned long d_flags;
1526 +       dprintk("%s()\n", __FUNCTION__);
1527 +       CRYPTO_DRIVER_LOCK();
1529 +       cap = crypto_checkdriver(driverid);
1530 +       if (cap != NULL &&
1531 +           (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
1532 +           cap->cc_alg[alg] != 0) {
1533 +               cap->cc_alg[alg] = 0;
1534 +               cap->cc_max_op_len[alg] = 0;
1536 +               /* Was this the last algorithm ? */
1537 +               for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
1538 +                       if (cap->cc_alg[i] != 0)
1539 +                               break;
1541 +               if (i == CRYPTO_ALGORITHM_MAX + 1)
1542 +                       driver_finis(cap);
1543 +               err = 0;
1544 +       } else
1545 +               err = EINVAL;
1546 +       CRYPTO_DRIVER_UNLOCK();
1547 +       return err;
1548 +}
1550 +/*
1551 + * Unregister all algorithms associated with a crypto driver.
1552 + * If there are pending sessions using it, leave enough information
1553 + * around so that subsequent calls using those sessions will
1554 + * correctly detect the driver has been unregistered and reroute
1555 + * requests.
1556 + */
1557 +int
1558 +crypto_unregister_all(u_int32_t driverid)
1559 +{
1560 +       struct cryptocap *cap;
1561 +       int err;
1562 +       unsigned long d_flags;
1564 +       dprintk("%s()\n", __FUNCTION__);
1565 +       CRYPTO_DRIVER_LOCK();
1566 +       cap = crypto_checkdriver(driverid);
1567 +       if (cap != NULL) {
1568 +               driver_finis(cap);
1569 +               err = 0;
1570 +       } else
1571 +               err = EINVAL;
1572 +       CRYPTO_DRIVER_UNLOCK();
1574 +       return err;
1575 +}
1577 +/*
1578 + * Clear blockage on a driver.  The what parameter indicates whether
1579 + * the driver is now ready for cryptop's and/or cryptokop's.
1580 + */
1581 +int
1582 +crypto_unblock(u_int32_t driverid, int what)
1583 +{
1584 +       struct cryptocap *cap;
1585 +       int err;
1586 +       unsigned long q_flags;
1588 +       CRYPTO_Q_LOCK();
1589 +       cap = crypto_checkdriver(driverid);
1590 +       if (cap != NULL) {
1591 +               if (what & CRYPTO_SYMQ) {
1592 +                       cap->cc_qblocked = 0;
1593 +                       cap->cc_unqblocked = 0;
1594 +                       crypto_all_qblocked = 0;
1595 +               }
1596 +               if (what & CRYPTO_ASYMQ) {
1597 +                       cap->cc_kqblocked = 0;
1598 +                       cap->cc_unkqblocked = 0;
1599 +                       crypto_all_kqblocked = 0;
1600 +               }
1601 +               if (crp_sleep)
1602 +                       wake_up_interruptible(&cryptoproc_wait);
1603 +               err = 0;
1604 +       } else
1605 +               err = EINVAL;
1606 +       CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
1608 +       return err;
1609 +}
1611 +/*
1612 + * Add a crypto request to a queue, to be processed by the kernel thread.
1613 + */
1614 +int
1615 +crypto_dispatch(struct cryptop *crp)
1616 +{
1617 +       struct cryptocap *cap;
1618 +       int result = -1;
1619 +       unsigned long q_flags;
1621 +       dprintk("%s()\n", __FUNCTION__);
1623 +       cryptostats.cs_ops++;
1625 +       CRYPTO_Q_LOCK();
1626 +       if (crypto_q_cnt >= crypto_q_max) {
1627 +               CRYPTO_Q_UNLOCK();
1628 +               cryptostats.cs_drops++;
1629 +               return ENOMEM;
1630 +       }
1631 +       crypto_q_cnt++;
1633 +       /* make sure we are starting a fresh run on this crp. */
1634 +       crp->crp_flags &= ~CRYPTO_F_DONE;
1635 +       crp->crp_etype = 0;
1637 +       /*
1638 +        * Caller marked the request to be processed immediately; dispatch
1639 +        * it directly to the driver unless the driver is currently blocked.
1640 +        */
1641 +       if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
1642 +               int hid = CRYPTO_SESID2HID(crp->crp_sid);
1643 +               cap = crypto_checkdriver(hid);
1644 +               /* Driver cannot disappear when there is an active session. */
1645 +               KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
1646 +               if (!cap->cc_qblocked) {
1647 +                       crypto_all_qblocked = 0;
1648 +                       crypto_drivers[hid].cc_unqblocked = 1;
1649 +                       CRYPTO_Q_UNLOCK();
1650 +                       result = crypto_invoke(cap, crp, 0);
1651 +                       CRYPTO_Q_LOCK();
1652 +                       if (result == ERESTART)
1653 +                               if (crypto_drivers[hid].cc_unqblocked)
1654 +                                       crypto_drivers[hid].cc_qblocked = 1;
1655 +                       crypto_drivers[hid].cc_unqblocked = 0;
1656 +               }
1657 +       }
1658 +       if (result == ERESTART) {
1659 +               /*
1660 +                * The driver ran out of resources, mark the
1661 +                * driver ``blocked'' for cryptop's and put
1662 +                * the request back in the queue.  It would
1663 +                * best to put the request back where we got
1664 +                * it but that's hard so for now we put it
1665 +                * at the front.  This should be ok; putting
1666 +                * it at the end does not work.
1667 +                */
1668 +               list_add(&crp->crp_next, &crp_q);
1669 +               cryptostats.cs_blocks++;
1670 +               result = 0;
1671 +       } else if (result == -1) {
1672 +               TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1673 +               result = 0;
1674 +       }
1675 +       if (crp_sleep)
1676 +               wake_up_interruptible(&cryptoproc_wait);
1677 +       CRYPTO_Q_UNLOCK();
1678 +       return result;
1679 +}
1681 +/*
1682 + * Add an asymetric crypto request to a queue,
1683 + * to be processed by the kernel thread.
1684 + */
1685 +int
1686 +crypto_kdispatch(struct cryptkop *krp)
1687 +{
1688 +       int error;
1689 +       unsigned long q_flags;
1691 +       cryptostats.cs_kops++;
1693 +       error = crypto_kinvoke(krp, krp->krp_crid);
1694 +       if (error == ERESTART) {
1695 +               CRYPTO_Q_LOCK();
1696 +               TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1697 +               if (crp_sleep)
1698 +                       wake_up_interruptible(&cryptoproc_wait);
1699 +               CRYPTO_Q_UNLOCK();
1700 +               error = 0;
1701 +       }
1702 +       return error;
1703 +}
1705 +/*
1706 + * Verify a driver is suitable for the specified operation.
1707 + */
1708 +static __inline int
1709 +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1710 +{
1711 +       return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1712 +}
1714 +/*
1715 + * Select a driver for an asym operation.  The driver must
1716 + * support the necessary algorithm.  The caller can constrain
1717 + * which device is selected with the flags parameter.  The
1718 + * algorithm we use here is pretty stupid; just use the first
1719 + * driver that supports the algorithms we need. If there are
1720 + * multiple suitable drivers we choose the driver with the
1721 + * fewest active operations.  We prefer hardware-backed
1722 + * drivers to software ones when either may be used.
1723 + */
1724 +static struct cryptocap *
1725 +crypto_select_kdriver(const struct cryptkop *krp, int flags)
1726 +{
1727 +       struct cryptocap *cap, *best, *blocked;
1728 +       int match, hid;
1730 +       CRYPTO_DRIVER_ASSERT();
1732 +       /*
1733 +        * Look first for hardware crypto devices if permitted.
1734 +        */
1735 +       if (flags & CRYPTOCAP_F_HARDWARE)
1736 +               match = CRYPTOCAP_F_HARDWARE;
1737 +       else
1738 +               match = CRYPTOCAP_F_SOFTWARE;
1739 +       best = NULL;
1740 +       blocked = NULL;
1741 +again:
1742 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
1743 +               cap = &crypto_drivers[hid];
1744 +               /*
1745 +                * If it's not initialized, is in the process of
1746 +                * going away, or is not appropriate (hardware
1747 +                * or software based on match), then skip.
1748 +                */
1749 +               if (cap->cc_dev == NULL ||
1750 +                   (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
1751 +                   (cap->cc_flags & match) == 0)
1752 +                       continue;
1754 +               /* verify all the algorithms are supported. */
1755 +               if (kdriver_suitable(cap, krp)) {
1756 +                       if (best == NULL ||
1757 +                           cap->cc_koperations < best->cc_koperations)
1758 +                               best = cap;
1759 +               }
1760 +       }
1761 +       if (best != NULL)
1762 +               return best;
1763 +       if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1764 +               /* sort of an Algol 68-style for loop */
1765 +               match = CRYPTOCAP_F_SOFTWARE;
1766 +               goto again;
1767 +       }
1768 +       return best;
1769 +}
1771 +/*
1772 + * Dispatch an assymetric crypto request.
1773 + */
1774 +static int
1775 +crypto_kinvoke(struct cryptkop *krp, int crid)
1776 +{
1777 +       struct cryptocap *cap = NULL;
1778 +       int error;
1779 +       unsigned long d_flags;
1781 +       KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1782 +       KASSERT(krp->krp_callback != NULL,
1783 +           ("%s: krp->crp_callback == NULL", __func__));
1785 +       CRYPTO_DRIVER_LOCK();
1786 +       if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1787 +               cap = crypto_checkdriver(crid);
1788 +               if (cap != NULL) {
1789 +                       /*
1790 +                        * Driver present, it must support the necessary
1791 +                        * algorithm and, if s/w drivers are excluded,
1792 +                        * it must be registered as hardware-backed.
1793 +                        */
1794 +                       if (!kdriver_suitable(cap, krp) ||
1795 +                           (!crypto_devallowsoft &&
1796 +                            (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1797 +                               cap = NULL;
1798 +               }
1799 +       } else {
1800 +               /*
1801 +                * No requested driver; select based on crid flags.
1802 +                */
1803 +               if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
1804 +                       crid &= ~CRYPTOCAP_F_SOFTWARE;
1805 +               cap = crypto_select_kdriver(krp, crid);
1806 +       }
1807 +       if (cap != NULL && !cap->cc_kqblocked) {
1808 +               krp->krp_hid = cap - crypto_drivers;
1809 +               cap->cc_koperations++;
1810 +               CRYPTO_DRIVER_UNLOCK();
1811 +               error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1812 +               CRYPTO_DRIVER_LOCK();
1813 +               if (error == ERESTART) {
1814 +                       cap->cc_koperations--;
1815 +                       CRYPTO_DRIVER_UNLOCK();
1816 +                       return (error);
1817 +               }
1818 +               /* return the actual device used */
1819 +               krp->krp_crid = krp->krp_hid;
1820 +       } else {
1821 +               /*
1822 +                * NB: cap is !NULL if device is blocked; in
1823 +                *     that case return ERESTART so the operation
1824 +                *     is resubmitted if possible.
1825 +                */
1826 +               error = (cap == NULL) ? ENODEV : ERESTART;
1827 +       }
1828 +       CRYPTO_DRIVER_UNLOCK();
1830 +       if (error) {
1831 +               krp->krp_status = error;
1832 +               crypto_kdone(krp);
1833 +       }
1834 +       return 0;
1835 +}
1838 +/*
1839 + * Dispatch a crypto request to the appropriate crypto devices.
1840 + */
1841 +static int
1842 +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1843 +{
1844 +       KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1845 +       KASSERT(crp->crp_callback != NULL,
1846 +           ("%s: crp->crp_callback == NULL", __func__));
1847 +       KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
1849 +       dprintk("%s()\n", __FUNCTION__);
1851 +#ifdef CRYPTO_TIMING
1852 +       if (crypto_timing)
1853 +               crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1854 +#endif
1855 +       if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1856 +               struct cryptodesc *crd;
1857 +               u_int64_t nid;
1859 +               /*
1860 +                * Driver has unregistered; migrate the session and return
1861 +                * an error to the caller so they'll resubmit the op.
1862 +                *
1863 +                * XXX: What if there are more already queued requests for this
1864 +                *      session?
1865 +                */
1866 +               crypto_freesession(crp->crp_sid);
1868 +               for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1869 +                       crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1871 +               /* XXX propagate flags from initial session? */
1872 +               if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
1873 +                   CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1874 +                       crp->crp_sid = nid;
1876 +               crp->crp_etype = EAGAIN;
1877 +               crypto_done(crp);
1878 +               return 0;
1879 +       } else {
1880 +               /*
1881 +                * Invoke the driver to process the request.
1882 +                */
1883 +               return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1884 +       }
1885 +}
1887 +/*
1888 + * Release a set of crypto descriptors.
1889 + */
1890 +void
1891 +crypto_freereq(struct cryptop *crp)
1892 +{
1893 +       struct cryptodesc *crd;
1895 +       if (crp == NULL)
1896 +               return;
1898 +#ifdef DIAGNOSTIC
1899 +       {
1900 +               struct cryptop *crp2;
1901 +               unsigned long q_flags;
1903 +               CRYPTO_Q_LOCK();
1904 +               TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1905 +                       KASSERT(crp2 != crp,
1906 +                           ("Freeing cryptop from the crypto queue (%p).",
1907 +                           crp));
1908 +               }
1909 +               CRYPTO_Q_UNLOCK();
1910 +               CRYPTO_RETQ_LOCK();
1911 +               TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
1912 +                       KASSERT(crp2 != crp,
1913 +                           ("Freeing cryptop from the return queue (%p).",
1914 +                           crp));
1915 +               }
1916 +               CRYPTO_RETQ_UNLOCK();
1917 +       }
1918 +#endif
1920 +       while ((crd = crp->crp_desc) != NULL) {
1921 +               crp->crp_desc = crd->crd_next;
1922 +               kmem_cache_free(cryptodesc_zone, crd);
1923 +       }
1924 +       kmem_cache_free(cryptop_zone, crp);
1925 +}
1927 +/*
1928 + * Acquire a set of crypto descriptors.
1929 + */
1930 +struct cryptop *
1931 +crypto_getreq(int num)
1932 +{
1933 +       struct cryptodesc *crd;
1934 +       struct cryptop *crp;
1936 +       crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
1937 +       if (crp != NULL) {
1938 +               memset(crp, 0, sizeof(*crp));
1939 +               INIT_LIST_HEAD(&crp->crp_next);
1940 +               init_waitqueue_head(&crp->crp_waitq);
1941 +               while (num--) {
1942 +                       crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
1943 +                       if (crd == NULL) {
1944 +                               crypto_freereq(crp);
1945 +                               return NULL;
1946 +                       }
1947 +                       memset(crd, 0, sizeof(*crd));
1948 +                       crd->crd_next = crp->crp_desc;
1949 +                       crp->crp_desc = crd;
1950 +               }
1951 +       }
1952 +       return crp;
1953 +}
1955 +/*
1956 + * Invoke the callback on behalf of the driver.
1957 + */
1958 +void
1959 +crypto_done(struct cryptop *crp)
1960 +{
1961 +       unsigned long q_flags;
1963 +       dprintk("%s()\n", __FUNCTION__);
1964 +       if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
1965 +               crp->crp_flags |= CRYPTO_F_DONE;
1966 +               CRYPTO_Q_LOCK();
1967 +               crypto_q_cnt--;
1968 +               CRYPTO_Q_UNLOCK();
1969 +       } else
1970 +               printk("crypto: crypto_done op already done, flags 0x%x",
1971 +                               crp->crp_flags);
1972 +       if (crp->crp_etype != 0)
1973 +               cryptostats.cs_errs++;
1974 +       /*
1975 +        * CBIMM means unconditionally do the callback immediately;
1976 +        * CBIFSYNC means do the callback immediately only if the
1977 +        * operation was done synchronously.  Both are used to avoid
1978 +        * doing extraneous context switches; the latter is mostly
1979 +        * used with the software crypto driver.
1980 +        */
1981 +       if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1982 +           ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1983 +            (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
1984 +               /*
1985 +                * Do the callback directly.  This is ok when the
1986 +                * callback routine does very little (e.g. the
1987 +                * /dev/crypto callback method just does a wakeup).
1988 +                */
1989 +               crp->crp_callback(crp);
1990 +       } else {
1991 +               unsigned long r_flags;
1992 +               /*
1993 +                * Normal case; queue the callback for the thread.
1994 +                */
1995 +               CRYPTO_RETQ_LOCK();
1996 +               if (CRYPTO_RETQ_EMPTY())
1997 +                       wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
1998 +               TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1999 +               CRYPTO_RETQ_UNLOCK();
2000 +       }
2001 +}
2003 +/*
2004 + * Invoke the callback on behalf of the driver.
2005 + */
2006 +void
2007 +crypto_kdone(struct cryptkop *krp)
2008 +{
2009 +       struct cryptocap *cap;
2010 +       unsigned long d_flags;
2012 +       if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
2013 +               printk("crypto: crypto_kdone op already done, flags 0x%x",
2014 +                               krp->krp_flags);
2015 +       krp->krp_flags |= CRYPTO_KF_DONE;
2016 +       if (krp->krp_status != 0)
2017 +               cryptostats.cs_kerrs++;
2019 +       CRYPTO_DRIVER_LOCK();
2020 +       /* XXX: What if driver is loaded in the meantime? */
2021 +       if (krp->krp_hid < crypto_drivers_num) {
2022 +               cap = &crypto_drivers[krp->krp_hid];
2023 +               cap->cc_koperations--;
2024 +               KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
2025 +               if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
2026 +                       crypto_remove(cap);
2027 +       }
2028 +       CRYPTO_DRIVER_UNLOCK();
2030 +       /*
2031 +        * CBIMM means unconditionally do the callback immediately;
2032 +        * This is used to avoid doing extraneous context switches
2033 +        */
2034 +       if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
2035 +               /*
2036 +                * Do the callback directly.  This is ok when the
2037 +                * callback routine does very little (e.g. the
2038 +                * /dev/crypto callback method just does a wakeup).
2039 +                */
2040 +               krp->krp_callback(krp);
2041 +       } else {
2042 +               unsigned long r_flags;
2043 +               /*
2044 +                * Normal case; queue the callback for the thread.
2045 +                */
2046 +               CRYPTO_RETQ_LOCK();
2047 +               if (CRYPTO_RETQ_EMPTY())
2048 +                       wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
2049 +               TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
2050 +               CRYPTO_RETQ_UNLOCK();
2051 +       }
2052 +}
2054 +int
2055 +crypto_getfeat(int *featp)
2056 +{
2057 +       int hid, kalg, feat = 0;
2058 +       unsigned long d_flags;
2060 +       CRYPTO_DRIVER_LOCK();
2061 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
2062 +               const struct cryptocap *cap = &crypto_drivers[hid];
2064 +               if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
2065 +                   !crypto_devallowsoft) {
2066 +                       continue;
2067 +               }
2068 +               for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
2069 +                       if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
2070 +                               feat |=  1 << kalg;
2071 +       }
2072 +       CRYPTO_DRIVER_UNLOCK();
2073 +       *featp = feat;
2074 +       return (0);
2075 +}
2077 +/*
2078 + * Crypto thread, dispatches crypto requests.
2079 + */
2080 +static int
2081 +crypto_proc(void *arg)
2082 +{
2083 +       struct cryptop *crp, *submit;
2084 +       struct cryptkop *krp, *krpp;
2085 +       struct cryptocap *cap;
2086 +       u_int32_t hid;
2087 +       int result, hint;
2088 +       unsigned long q_flags;
2089 +       int loopcount = 0;
2091 +       ocf_daemonize("crypto");
2093 +       CRYPTO_Q_LOCK();
2094 +       for (;;) {
2095 +               /*
2096 +                * we need to make sure we don't get into a busy loop with nothing
2097 +                * to do,  the two crypto_all_*blocked vars help us find out when
2098 +                * we are all full and can do nothing on any driver or Q.  If so we
2099 +                * wait for an unblock.
2100 +                */
2101 +               crypto_all_qblocked  = !list_empty(&crp_q);
2103 +               /*
2104 +                * Find the first element in the queue that can be
2105 +                * processed and look-ahead to see if multiple ops
2106 +                * are ready for the same driver.
2107 +                */
2108 +               submit = NULL;
2109 +               hint = 0;
2110 +               list_for_each_entry(crp, &crp_q, crp_next) {
2111 +                       hid = CRYPTO_SESID2HID(crp->crp_sid);
2112 +                       cap = crypto_checkdriver(hid);
2113 +                       /*
2114 +                        * Driver cannot disappear when there is an active
2115 +                        * session.
2116 +                        */
2117 +                       KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2118 +                           __func__, __LINE__));
2119 +                       if (cap == NULL || cap->cc_dev == NULL) {
2120 +                               /* Op needs to be migrated, process it. */
2121 +                               if (submit == NULL)
2122 +                                       submit = crp;
2123 +                               break;
2124 +                       }
2125 +                       if (!cap->cc_qblocked) {
2126 +                               if (submit != NULL) {
2127 +                                       /*
2128 +                                        * We stop on finding another op,
2129 +                                        * regardless whether its for the same
2130 +                                        * driver or not.  We could keep
2131 +                                        * searching the queue but it might be
2132 +                                        * better to just use a per-driver
2133 +                                        * queue instead.
2134 +                                        */
2135 +                                       if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
2136 +                                               hint = CRYPTO_HINT_MORE;
2137 +                                       break;
2138 +                               } else {
2139 +                                       submit = crp;
2140 +                                       if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
2141 +                                               break;
2142 +                                       /* keep scanning for more are q'd */
2143 +                               }
2144 +                       }
2145 +               }
2146 +               if (submit != NULL) {
2147 +                       hid = CRYPTO_SESID2HID(submit->crp_sid);
2148 +                       crypto_all_qblocked = 0;
2149 +                       list_del(&submit->crp_next);
2150 +                       crypto_drivers[hid].cc_unqblocked = 1;
2151 +                       cap = crypto_checkdriver(hid);
2152 +                       CRYPTO_Q_UNLOCK();
2153 +                       KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
2154 +                           __func__, __LINE__));
2155 +                       result = crypto_invoke(cap, submit, hint);
2156 +                       CRYPTO_Q_LOCK();
2157 +                       if (result == ERESTART) {
2158 +                               /*
2159 +                                * The driver ran out of resources, mark the
2160 +                                * driver ``blocked'' for cryptop's and put
2161 +                                * the request back in the queue.  It would
2162 +                                * best to put the request back where we got
2163 +                                * it but that's hard so for now we put it
2164 +                                * at the front.  This should be ok; putting
2165 +                                * it at the end does not work.
2166 +                                */
2167 +                               /* XXX validate sid again? */
2168 +                               list_add(&submit->crp_next, &crp_q);
2169 +                               cryptostats.cs_blocks++;
2170 +                               if (crypto_drivers[hid].cc_unqblocked)
2171 +                                       crypto_drivers[hid].cc_qblocked=0;
2172 +                               crypto_drivers[hid].cc_unqblocked=0;
2173 +                       }
2174 +                       crypto_drivers[hid].cc_unqblocked = 0;
2175 +               }
2177 +               crypto_all_kqblocked = !list_empty(&crp_kq);
2179 +               /* As above, but for key ops */
2180 +               krp = NULL;
2181 +               list_for_each_entry(krpp, &crp_kq, krp_next) {
2182 +                       cap = crypto_checkdriver(krpp->krp_hid);
2183 +                       if (cap == NULL || cap->cc_dev == NULL) {
2184 +                               /*
2185 +                                * Operation needs to be migrated, invalidate
2186 +                                * the assigned device so it will reselect a
2187 +                                * new one below.  Propagate the original
2188 +                                * crid selection flags if supplied.
2189 +                                */
2190 +                               krp->krp_hid = krp->krp_crid &
2191 +                                   (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
2192 +                               if (krp->krp_hid == 0)
2193 +                                       krp->krp_hid =
2194 +                                   CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
2195 +                               break;
2196 +                       }
2197 +                       if (!cap->cc_kqblocked) {
2198 +                               krp = krpp;
2199 +                               break;
2200 +                       }
2201 +               }
2202 +               if (krp != NULL) {
2203 +                       crypto_all_kqblocked = 0;
2204 +                       list_del(&krp->krp_next);
2205 +                       crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
2206 +                       CRYPTO_Q_UNLOCK();
2207 +                       result = crypto_kinvoke(krp, krp->krp_hid);
2208 +                       CRYPTO_Q_LOCK();
2209 +                       if (result == ERESTART) {
2210 +                               /*
2211 +                                * The driver ran out of resources, mark the
2212 +                                * driver ``blocked'' for cryptkop's and put
2213 +                                * the request back in the queue.  It would
2214 +                                * best to put the request back where we got
2215 +                                * it but that's hard so for now we put it
2216 +                                * at the front.  This should be ok; putting
2217 +                                * it at the end does not work.
2218 +                                */
2219 +                               /* XXX validate sid again? */
2220 +                               list_add(&krp->krp_next, &crp_kq);
2221 +                               cryptostats.cs_kblocks++;
2222 +                       } else
2223 +                               crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
2224 +               }
2226 +               if (submit == NULL && krp == NULL) {
2227 +                       /*
2228 +                        * Nothing more to be processed.  Sleep until we're
2229 +                        * woken because there are more ops to process.
2230 +                        * This happens either by submission or by a driver
2231 +                        * becoming unblocked and notifying us through
2232 +                        * crypto_unblock.  Note that when we wakeup we
2233 +                        * start processing each queue again from the
2234 +                        * front. It's not clear that it's important to
2235 +                        * preserve this ordering since ops may finish
2236 +                        * out of order if dispatched to different devices
2237 +                        * and some become blocked while others do not.
2238 +                        */
2239 +                       dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
2240 +                                       __FUNCTION__,
2241 +                                       list_empty(&crp_q), crypto_all_qblocked,
2242 +                                       list_empty(&crp_kq), crypto_all_kqblocked);
2243 +                       loopcount = 0;
2244 +                       CRYPTO_Q_UNLOCK();
2245 +                       crp_sleep = 1;
2246 +                       wait_event_interruptible(cryptoproc_wait,
2247 +                                       !(list_empty(&crp_q) || crypto_all_qblocked) ||
2248 +                                       !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
2249 +                                       cryptoproc == (pid_t) -1);
2250 +                       crp_sleep = 0;
2251 +                       if (signal_pending (current)) {
2252 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
2253 +                               spin_lock_irq(&current->sigmask_lock);
2254 +#endif
2255 +                               flush_signals(current);
2256 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
2257 +                               spin_unlock_irq(&current->sigmask_lock);
2258 +#endif
2259 +                       }
2260 +                       CRYPTO_Q_LOCK();
2261 +                       dprintk("%s - awake\n", __FUNCTION__);
2262 +                       if (cryptoproc == (pid_t) -1)
2263 +                               break;
2264 +                       cryptostats.cs_intrs++;
2265 +               } else if (loopcount > crypto_max_loopcount) {
2266 +                       /*
2267 +                        * Give other processes a chance to run if we've 
2268 +                        * been using the CPU exclusively for a while.
2269 +                        */
2270 +                       loopcount = 0;
2271 +                       schedule();
2272 +               }
2273 +               loopcount++;
2274 +       }
2275 +       CRYPTO_Q_UNLOCK();
2276 +       complete_and_exit(&cryptoproc_exited, 0);
2277 +}
2279 +/*
2280 + * Crypto returns thread, does callbacks for processed crypto requests.
2281 + * Callbacks are done here, rather than in the crypto drivers, because
2282 + * callbacks typically are expensive and would slow interrupt handling.
2283 + */
2284 +static int
2285 +crypto_ret_proc(void *arg)
2286 +{
2287 +       struct cryptop *crpt;
2288 +       struct cryptkop *krpt;
2289 +       unsigned long  r_flags;
2291 +       ocf_daemonize("crypto_ret");
2293 +       CRYPTO_RETQ_LOCK();
2294 +       for (;;) {
2295 +               /* Harvest return q's for completed ops */
2296 +               crpt = NULL;
2297 +               if (!list_empty(&crp_ret_q))
2298 +                       crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
2299 +               if (crpt != NULL)
2300 +                       list_del(&crpt->crp_next);
2302 +               krpt = NULL;
2303 +               if (!list_empty(&crp_ret_kq))
2304 +                       krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
2305 +               if (krpt != NULL)
2306 +                       list_del(&krpt->krp_next);
2308 +               if (crpt != NULL || krpt != NULL) {
2309 +                       CRYPTO_RETQ_UNLOCK();
2310 +                       /*
2311 +                        * Run callbacks unlocked.
2312 +                        */
2313 +                       if (crpt != NULL)
2314 +                               crpt->crp_callback(crpt);
2315 +                       if (krpt != NULL)
2316 +                               krpt->krp_callback(krpt);
2317 +                       CRYPTO_RETQ_LOCK();
2318 +               } else {
2319 +                       /*
2320 +                        * Nothing more to be processed.  Sleep until we're
2321 +                        * woken because there are more returns to process.
2322 +                        */
2323 +                       dprintk("%s - sleeping\n", __FUNCTION__);
2324 +                       CRYPTO_RETQ_UNLOCK();
2325 +                       wait_event_interruptible(cryptoretproc_wait,
2326 +                                       cryptoretproc == (pid_t) -1 ||
2327 +                                       !list_empty(&crp_ret_q) ||
2328 +                                       !list_empty(&crp_ret_kq));
2329 +                       if (signal_pending (current)) {
2330 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
2331 +                               spin_lock_irq(&current->sigmask_lock);
2332 +#endif
2333 +                               flush_signals(current);
2334 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
2335 +                               spin_unlock_irq(&current->sigmask_lock);
2336 +#endif
2337 +                       }
2338 +                       CRYPTO_RETQ_LOCK();
2339 +                       dprintk("%s - awake\n", __FUNCTION__);
2340 +                       if (cryptoretproc == (pid_t) -1) {
2341 +                               dprintk("%s - EXITING!\n", __FUNCTION__);
2342 +                               break;
2343 +                       }
2344 +                       cryptostats.cs_rets++;
2345 +               }
2346 +       }
2347 +       CRYPTO_RETQ_UNLOCK();
2348 +       complete_and_exit(&cryptoretproc_exited, 0);
2349 +}
2352 +#if 0 /* should put this into /proc or something */
2353 +static void
2354 +db_show_drivers(void)
2355 +{
2356 +       int hid;
2358 +       db_printf("%12s %4s %4s %8s %2s %2s\n"
2359 +               , "Device"
2360 +               , "Ses"
2361 +               , "Kops"
2362 +               , "Flags"
2363 +               , "QB"
2364 +               , "KB"
2365 +       );
2366 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
2367 +               const struct cryptocap *cap = &crypto_drivers[hid];
2368 +               if (cap->cc_dev == NULL)
2369 +                       continue;
2370 +               db_printf("%-12s %4u %4u %08x %2u %2u\n"
2371 +                   , device_get_nameunit(cap->cc_dev)
2372 +                   , cap->cc_sessions
2373 +                   , cap->cc_koperations
2374 +                   , cap->cc_flags
2375 +                   , cap->cc_qblocked
2376 +                   , cap->cc_kqblocked
2377 +               );
2378 +       }
2379 +}
2381 +DB_SHOW_COMMAND(crypto, db_show_crypto)
2382 +{
2383 +       struct cryptop *crp;
2385 +       db_show_drivers();
2386 +       db_printf("\n");
2388 +       db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
2389 +           "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
2390 +           "Desc", "Callback");
2391 +       TAILQ_FOREACH(crp, &crp_q, crp_next) {
2392 +               db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
2393 +                   , (int) CRYPTO_SESID2HID(crp->crp_sid)
2394 +                   , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
2395 +                   , crp->crp_ilen, crp->crp_olen
2396 +                   , crp->crp_etype
2397 +                   , crp->crp_flags
2398 +                   , crp->crp_desc
2399 +                   , crp->crp_callback
2400 +               );
2401 +       }
2402 +       if (!TAILQ_EMPTY(&crp_ret_q)) {
2403 +               db_printf("\n%4s %4s %4s %8s\n",
2404 +                   "HID", "Etype", "Flags", "Callback");
2405 +               TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
2406 +                       db_printf("%4u %4u %04x %8p\n"
2407 +                           , (int) CRYPTO_SESID2HID(crp->crp_sid)
2408 +                           , crp->crp_etype
2409 +                           , crp->crp_flags
2410 +                           , crp->crp_callback
2411 +                       );
2412 +               }
2413 +       }
2414 +}
2416 +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
2417 +{
2418 +       struct cryptkop *krp;
2420 +       db_show_drivers();
2421 +       db_printf("\n");
2423 +       db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
2424 +           "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
2425 +       TAILQ_FOREACH(krp, &crp_kq, krp_next) {
2426 +               db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
2427 +                   , krp->krp_op
2428 +                   , krp->krp_status
2429 +                   , krp->krp_iparams, krp->krp_oparams
2430 +                   , krp->krp_crid, krp->krp_hid
2431 +                   , krp->krp_callback
2432 +               );
2433 +       }
2434 +       if (!TAILQ_EMPTY(&crp_ret_q)) {
2435 +               db_printf("%4s %5s %8s %4s %8s\n",
2436 +                   "Op", "Status", "CRID", "HID", "Callback");
2437 +               TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
2438 +                       db_printf("%4u %5u %08x %4u %8p\n"
2439 +                           , krp->krp_op
2440 +                           , krp->krp_status
2441 +                           , krp->krp_crid, krp->krp_hid
2442 +                           , krp->krp_callback
2443 +                       );
2444 +               }
2445 +       }
2446 +}
2447 +#endif
2450 +static int
2451 +crypto_init(void)
2452 +{
2453 +       int error;
2455 +       dprintk("%s(%p)\n", __FUNCTION__, (void *) crypto_init);
2457 +       if (crypto_initted)
2458 +               return 0;
2459 +       crypto_initted = 1;
2461 +       spin_lock_init(&crypto_drivers_lock);
2462 +       spin_lock_init(&crypto_q_lock);
2463 +       spin_lock_init(&crypto_ret_q_lock);
2465 +       cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
2466 +                                      0, SLAB_HWCACHE_ALIGN, NULL
2467 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2468 +                                      , NULL
2469 +#endif
2470 +                                       );
2472 +       cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
2473 +                                      0, SLAB_HWCACHE_ALIGN, NULL
2474 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2475 +                                      , NULL
2476 +#endif
2477 +                                       );
2479 +       if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
2480 +               printk("crypto: crypto_init cannot setup crypto zones\n");
2481 +               error = ENOMEM;
2482 +               goto bad;
2483 +       }
2485 +       crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
2486 +       crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
2487 +                       GFP_KERNEL);
2488 +       if (crypto_drivers == NULL) {
2489 +               printk("crypto: crypto_init cannot setup crypto drivers\n");
2490 +               error = ENOMEM;
2491 +               goto bad;
2492 +       }
2494 +       memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
2496 +       init_completion(&cryptoproc_exited);
2497 +       init_completion(&cryptoretproc_exited);
2499 +       cryptoproc = 0; /* to avoid race condition where proc runs first */
2500 +       cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
2501 +       if (cryptoproc < 0) {
2502 +               error = cryptoproc;
2503 +               printk("crypto: crypto_init cannot start crypto thread; error %d",
2504 +                       error);
2505 +               goto bad;
2506 +       }
2508 +       cryptoretproc = 0; /* to avoid race condition where proc runs first */
2509 +       cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
2510 +       if (cryptoretproc < 0) {
2511 +               error = cryptoretproc;
2512 +               printk("crypto: crypto_init cannot start cryptoret thread; error %d",
2513 +                               error);
2514 +               goto bad;
2515 +       }
2517 +       return 0;
2518 +bad:
2519 +       crypto_exit();
2520 +       return error;
2521 +}
2524 +static void
2525 +crypto_exit(void)
2526 +{
2527 +       pid_t p;
2528 +       unsigned long d_flags;
2530 +       dprintk("%s()\n", __FUNCTION__);
2532 +       /*
2533 +        * Terminate any crypto threads.
2534 +        */
2536 +       CRYPTO_DRIVER_LOCK();
2537 +       p = cryptoproc;
2538 +       cryptoproc = (pid_t) -1;
2539 +       kill_proc(p, SIGTERM, 1);
2540 +       wake_up_interruptible(&cryptoproc_wait);
2541 +       CRYPTO_DRIVER_UNLOCK();
2543 +       wait_for_completion(&cryptoproc_exited);
2545 +       CRYPTO_DRIVER_LOCK();
2546 +       p = cryptoretproc;
2547 +       cryptoretproc = (pid_t) -1;
2548 +       kill_proc(p, SIGTERM, 1);
2549 +       wake_up_interruptible(&cryptoretproc_wait);
2550 +       CRYPTO_DRIVER_UNLOCK();
2552 +       wait_for_completion(&cryptoretproc_exited);
2554 +       /* XXX flush queues??? */
2556 +       /* 
2557 +        * Reclaim dynamically allocated resources.
2558 +        */
2559 +       if (crypto_drivers != NULL)
2560 +               kfree(crypto_drivers);
2562 +       if (cryptodesc_zone != NULL)
2563 +               kmem_cache_destroy(cryptodesc_zone);
2564 +       if (cryptop_zone != NULL)
2565 +               kmem_cache_destroy(cryptop_zone);
2566 +}
2569 +EXPORT_SYMBOL(crypto_newsession);
2570 +EXPORT_SYMBOL(crypto_freesession);
2571 +EXPORT_SYMBOL(crypto_get_driverid);
2572 +EXPORT_SYMBOL(crypto_kregister);
2573 +EXPORT_SYMBOL(crypto_register);
2574 +EXPORT_SYMBOL(crypto_unregister);
2575 +EXPORT_SYMBOL(crypto_unregister_all);
2576 +EXPORT_SYMBOL(crypto_unblock);