diff -Nur linux-2.6.36.orig/crypto/Kconfig linux-2.6.36/crypto/Kconfig --- linux-2.6.36.orig/crypto/Kconfig 2010-10-20 22:30:22.000000000 +0200 +++ linux-2.6.36/crypto/Kconfig 2010-11-09 20:28:04.004996902 +0100 @@ -845,3 +845,6 @@ source "drivers/crypto/Kconfig" endif # if CRYPTO + +source "crypto/ocf/Kconfig" + diff -Nur linux-2.6.36.orig/crypto/Makefile linux-2.6.36/crypto/Makefile --- linux-2.6.36.orig/crypto/Makefile 2010-10-20 22:30:22.000000000 +0200 +++ linux-2.6.36/crypto/Makefile 2010-11-09 20:28:04.014995662 +0100 @@ -86,6 +86,8 @@ obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o +obj-$(CONFIG_OCF_OCF) += ocf/ + # # generic algorithms and the async_tx api # diff -Nur linux-2.6.36.orig/crypto/ocf/c7108/aes-7108.c linux-2.6.36/crypto/ocf/c7108/aes-7108.c --- linux-2.6.36.orig/crypto/ocf/c7108/aes-7108.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.36/crypto/ocf/c7108/aes-7108.c 2010-11-09 20:28:04.061247304 +0100 @@ -0,0 +1,839 @@ +/* + * Copyright (C) 2006 Micronas USA + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Effort sponsored in part by the Defense Advanced Research Projects + * Agency (DARPA) and Air Force Research Laboratory, Air Force + * Materiel Command, USAF, under agreement number F30602-01-2-0537. + * + */ + +//#include <linux/config.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/crypto.h> +#include <linux/mm.h> +#include <linux/skbuff.h> +#include <linux/random.h> +#include <asm/io.h> +#include <asm/delay.h> +//#include <asm/scatterlist.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/highmem.h> +#include <cryptodev.h> +#include <uio.h> +#include <aes-7108.h> + +/* Runtime mode */ +static int c7108_crypto_mode = C7108_AES_CTRL_MODE_CTR; +//static int c7108_crypto_mode = C7108_AES_CTRL_MODE_CBC; + +static int32_t c7108_id = -1; +static struct cipher_7108 **c7108_sessions = NULL; +static u_int32_t c7108_sesnum = 0; +static unsigned long iobar; + +/* Crypto entry points */ +static int c7108_process(void *, struct cryptop *, int); +static int c7108_newsession(void *, u_int32_t *, struct cryptoini *); +static int c7108_freesession(void *, u_int64_t); + +/* Globals */ +static int debug = 0; +static spinlock_t csr_mutex; + +/* Generic controller-based lock */ +#define AES_LOCK()\ + spin_lock(&csr_mutex) +#define AES_UNLOCK()\ + spin_unlock(&csr_mutex) + +/* 7108 AES register access */ +#define c7108_reg_wr8(a,d) iowrite8(d, (void*)(iobar+(a))) +#define c7108_reg_wr16(a,d) iowrite16(d, (void*)(iobar+(a))) +#define c7108_reg_wr32(a,d) iowrite32(d, (void*)(iobar+(a))) +#define c7108_reg_rd8(a) ioread8((void*)(iobar+(a))) +#define c7108_reg_rd16(a) ioread16((void*)(iobar+(a))) +#define c7108_reg_rd32(a) ioread32((void*)(iobar+(a))) + +static int +c7108_xlate_key(int klen, u8* k8ptr, u32* k32ptr) +{ + int i, nw=0; + nw = ((klen >= 256) ? 8 : (klen >= 192) ? 6 : 4); + for ( i = 0; i < nw; i++) { + k32ptr[i] = (k8ptr[i+3] << 24) | (k8ptr[i+2] << 16) | + (k8ptr[i+1] << 8) | k8ptr[i]; + + } + return 0; +} + +static int +c7108_cache_key(int klen, u32* k32ptr, u8* k8ptr) +{ + int i, nb=0; + u8* ptr = (u8*)k32ptr; + nb = ((klen >= 256) ? 32 : (klen >= 192) ? 24 : 16); + for ( i = 0; i < nb; i++) + k8ptr[i] = ptr[i]; + return 0; +} + +static int +c7108_aes_setup_dma(u32 src, u32 dst, u32 len) +{ + if (len < 16) { + printk("len < 16\n"); + return -10; + } + if (len % 16) { + printk("len not multiple of 16\n"); + return -11; + } + c7108_reg_wr16(C7108_AES_DMA_SRC0_LO, (u16) src); + c7108_reg_wr16(C7108_AES_DMA_SRC0_HI, (u16)((src & 0xffff0000) >> 16)); + c7108_reg_wr16(C7108_AES_DMA_DST0_LO, (u16) dst); + c7108_reg_wr16(C7108_AES_DMA_DST0_HI, (u16)((dst & 0xffff0000) >> 16)); + c7108_reg_wr16(C7108_AES_DMA_LEN, (u16) ((len / 16) - 1)); + + return 0; +} + +static int +c7108_aes_set_hw_iv(u8 iv[16]) +{ + c7108_reg_wr16(C7108_AES_IV0_LO, (u16) ((iv[1] << 8) | iv[0])); + c7108_reg_wr16(C7108_AES_IV0_HI, (u16) ((iv[3] << 8) | iv[2])); + c7108_reg_wr16(C7108_AES_IV1_LO, (u16) ((iv[5] << 8) | iv[4])); + c7108_reg_wr16(C7108_AES_IV1_HI, (u16) ((iv[7] << 8) | iv[6])); + c7108_reg_wr16(C7108_AES_IV2_LO, (u16) ((iv[9] << 8) | iv[8])); + c7108_reg_wr16(C7108_AES_IV2_HI, (u16) ((iv[11] << 8) | iv[10])); + c7108_reg_wr16(C7108_AES_IV3_LO, (u16) ((iv[13] << 8) | iv[12])); + c7108_reg_wr16(C7108_AES_IV3_HI, (u16) ((iv[15] << 8) | iv[14])); + + return 0; +} + +static void +c7108_aes_read_dkey(u32 * dkey) +{ + dkey[0] = (c7108_reg_rd16(C7108_AES_EKEY0_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY0_LO); + dkey[1] = (c7108_reg_rd16(C7108_AES_EKEY1_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY1_LO); + dkey[2] = (c7108_reg_rd16(C7108_AES_EKEY2_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY2_LO); + dkey[3] = (c7108_reg_rd16(C7108_AES_EKEY3_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY3_LO); + dkey[4] = (c7108_reg_rd16(C7108_AES_EKEY4_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY4_LO); + dkey[5] = (c7108_reg_rd16(C7108_AES_EKEY5_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY5_LO); + dkey[6] = (c7108_reg_rd16(C7108_AES_EKEY6_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY6_LO); + dkey[7] = (c7108_reg_rd16(C7108_AES_EKEY7_HI) << 16) | + c7108_reg_rd16(C7108_AES_EKEY7_LO); +} + +static int +c7108_aes_cipher(int op, + u32 dst, + u32 src, + u32 len, + int klen, + u16 mode, + u32 key[8], + u8 iv[16]) +{ + int rv = 0, cnt=0; + u16 ctrl = 0, stat = 0; + + AES_LOCK(); + + /* Setup key length */ + if (klen == 128) { + ctrl |= C7108_AES_KEY_LEN_128; + } else if (klen == 192) { + ctrl |= C7108_AES_KEY_LEN_192; + } else if (klen == 256) { + ctrl |= C7108_AES_KEY_LEN_256; + } else { + AES_UNLOCK(); + return -3; + } + + /* Check opcode */ + if (C7108_AES_ENCRYPT == op) { + ctrl |= C7108_AES_ENCRYPT; + } else if (C7108_AES_DECRYPT == op) { + ctrl |= C7108_AES_DECRYPT; + } else { + AES_UNLOCK(); + return -4; + } + + /* check mode */ + if ( (mode != C7108_AES_CTRL_MODE_CBC) && + (mode != C7108_AES_CTRL_MODE_CFB) && + (mode != C7108_AES_CTRL_MODE_OFB) && + (mode != C7108_AES_CTRL_MODE_CTR) && + (mode != C7108_AES_CTRL_MODE_ECB) ) { + AES_UNLOCK(); + return -5; + } + + /* Now set mode */ + ctrl |= mode; + + /* For CFB, OFB, and CTR, neither backward key + * expansion nor key inversion is required. + */ + if ( (C7108_AES_DECRYPT == op) && + (C7108_AES_CTRL_MODE_CBC == mode || + C7108_AES_CTRL_MODE_ECB == mode ) ){ + + /* Program Key */ + c7108_reg_wr16(C7108_AES_KEY0_LO, (u16) key[4]); + c7108_reg_wr16(C7108_AES_KEY0_HI, (u16) (key[4] >> 16)); + c7108_reg_wr16(C7108_AES_KEY1_LO, (u16) key[5]); + c7108_reg_wr16(C7108_AES_KEY1_HI, (u16) (key[5] >> 16)); + c7108_reg_wr16(C7108_AES_KEY2_LO, (u16) key[6]); + c7108_reg_wr16(C7108_AES_KEY2_HI, (u16) (key[6] >> 16)); + c7108_reg_wr16(C7108_AES_KEY3_LO, (u16) key[7]); + c7108_reg_wr16(C7108_AES_KEY3_HI, (u16) (key[7] >> 16)); + c7108_reg_wr16(C7108_AES_KEY6_LO, (u16) key[2]); + c7108_reg_wr16(C7108_AES_KEY6_HI, (u16) (key[2] >> 16)); + c7108_reg_wr16(C7108_AES_KEY7_LO, (u16) key[3]); + c7108_reg_wr16(C7108_AES_KEY7_HI, (u16) (key[3] >> 16)); + + + if (192 == klen) { + c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[7]); + c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[7] >> 16)); + c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[7]); + c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[7] >> 16)); + + } else if (256 == klen) { + /* 256 */ + c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[0]); + c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[0] >> 16)); + c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[1]); + c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[1] >> 16)); + + } + + } else { + /* Program Key */ + c7108_reg_wr16(C7108_AES_KEY0_LO, (u16) key[0]); + c7108_reg_wr16(C7108_AES_KEY0_HI, (u16) (key[0] >> 16)); + c7108_reg_wr16(C7108_AES_KEY1_LO, (u16) key[1]); + c7108_reg_wr16(C7108_AES_KEY1_HI, (u16) (key[1] >> 16)); + c7108_reg_wr16(C7108_AES_KEY2_LO, (u16) key[2]); + c7108_reg_wr16(C7108_AES_KEY2_HI, (u16) (key[2] >> 16)); + c7108_reg_wr16(C7108_AES_KEY3_LO, (u16) key[3]); + c7108_reg_wr16(C7108_AES_KEY3_HI, (u16) (key[3] >> 16)); + c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[4]); + c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[4] >> 16)); + c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[5]); + c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[5] >> 16)); + c7108_reg_wr16(C7108_AES_KEY6_LO, (u16) key[6]); + c7108_reg_wr16(C7108_AES_KEY6_HI, (u16) (key[6] >> 16)); + c7108_reg_wr16(C7108_AES_KEY7_LO, (u16) key[7]); + c7108_reg_wr16(C7108_AES_KEY7_HI, (u16) (key[7] >> 16)); + + } + + /* Set IV always */ + c7108_aes_set_hw_iv(iv); + + /* Program DMA addresses */ + if ((rv = c7108_aes_setup_dma(src, dst, len)) < 0) { + AES_UNLOCK(); + return rv; + } + + + /* Start AES cipher */ + c7108_reg_wr16(C7108_AES_CTRL, ctrl | C7108_AES_GO); + + //printk("Ctrl: 0x%x\n", ctrl | C7108_AES_GO); + do { + /* TODO: interrupt mode */ + // printk("aes_stat=0x%x\n", stat); + //udelay(100); + } while ((cnt++ < 1000000) && + !((stat=c7108_reg_rd16(C7108_AES_CTRL))&C7108_AES_OP_DONE)); + + + if ((mode == C7108_AES_CTRL_MODE_ECB)|| + (mode == C7108_AES_CTRL_MODE_CBC)) { + /* Save out key when the lock is held ... */ + c7108_aes_read_dkey(key); + } + + AES_UNLOCK(); + return 0; + +} + +/* + * Generate a new crypto device session. + */ +static int +c7108_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) +{ + struct cipher_7108 **swd; + u_int32_t i; + char *algo; + int mode, xfm_type; + + dprintk("%s()\n", __FUNCTION__); + if (sid == NULL || cri == NULL) { + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__); + return EINVAL; + } + + if (c7108_sessions) { + for (i = 1; i < c7108_sesnum; i++) + if (c7108_sessions[i] == NULL) + break; + } else + i = 1; /* NB: to silence compiler warning */ + + if (c7108_sessions == NULL || i == c7108_sesnum) { + if (c7108_sessions == NULL) { + i = 1; /* We leave c7108_sessions[0] empty */ + c7108_sesnum = CRYPTO_SW_SESSIONS; + } else + c7108_sesnum *= 2; + + swd = kmalloc(c7108_sesnum * sizeof(struct cipher_7108 *), + GFP_ATOMIC); + if (swd == NULL) { + /* Reset session number */ + if (c7108_sesnum == CRYPTO_SW_SESSIONS) + c7108_sesnum = 0; + else + c7108_sesnum /= 2; + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__); + return ENOBUFS; + } + memset(swd, 0, c7108_sesnum * sizeof(struct cipher_7108 *)); + + /* Copy existing sessions */ + if (c7108_sessions) { + memcpy(swd, c7108_sessions, + (c7108_sesnum / 2) * sizeof(struct cipher_7108 *)); + kfree(c7108_sessions); + } + + c7108_sessions = swd; + + } + + swd = &c7108_sessions[i]; + *sid = i; + + while (cri) { + *swd = (struct cipher_7108 *) + kmalloc(sizeof(struct cipher_7108), GFP_ATOMIC); + if (*swd == NULL) { + c7108_freesession(NULL, i); + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); + return ENOBUFS; + } + memset(*swd, 0, sizeof(struct cipher_7108)); + + algo = NULL; + mode = 0; + xfm_type = HW_TYPE_CIPHER; + + switch (cri->cri_alg) { + + case CRYPTO_AES_CBC: + algo = "aes"; + mode = CRYPTO_TFM_MODE_CBC; + c7108_crypto_mode = C7108_AES_CTRL_MODE_CBC; + break; +#if 0 + case CRYPTO_AES_CTR: + algo = "aes_ctr"; + mode = CRYPTO_TFM_MODE_CBC; + c7108_crypto_mode = C7108_AES_CTRL_MODE_CTR; + break; + case CRYPTO_AES_ECB: + algo = "aes_ecb"; + mode = CRYPTO_TFM_MODE_CBC; + c7108_crypto_mode = C7108_AES_CTRL_MODE_ECB; + break; + case CRYPTO_AES_OFB: + algo = "aes_ofb"; + mode = CRYPTO_TFM_MODE_CBC; + c7108_crypto_mode = C7108_AES_CTRL_MODE_OFB; + break; + case CRYPTO_AES_CFB: + algo = "aes_cfb"; + mode = CRYPTO_TFM_MODE_CBC; + c7108_crypto_mode = C7108_AES_CTRL_MODE_CFB; + break; +#endif + default: + printk("unsupported crypto algorithm: %d\n", + cri->cri_alg); + return -EINVAL; + break; + } + + + if (!algo || !*algo) { + printk("cypher_7108_crypto: Unknown algo 0x%x\n", + cri->cri_alg); + c7108_freesession(NULL, i); + return EINVAL; + } + + if (xfm_type == HW_TYPE_CIPHER) { + if (debug) { + dprintk("%s key:", __FUNCTION__); + for (i = 0; i < (cri->cri_klen + 7) / 8; i++) + dprintk("%s0x%02x", (i % 8) ? " " : "\n ", + cri->cri_key[i]); + dprintk("\n"); + } + + } else if (xfm_type == SW_TYPE_HMAC || + xfm_type == SW_TYPE_HASH) { + printk("cypher_7108_crypto: HMAC unsupported!\n"); + return -EINVAL; + c7108_freesession(NULL, i); + } else { + printk("cypher_7108_crypto: " + "Unhandled xfm_type %d\n", xfm_type); + c7108_freesession(NULL, i); + return EINVAL; + } + + (*swd)->cri_alg = cri->cri_alg; + (*swd)->xfm_type = xfm_type; + + cri = cri->cri_next; + swd = &((*swd)->next); + } + return 0; +} + +/* + * Free a session. + */ +static int +c7108_freesession(void *arg, u_int64_t tid) +{ + struct cipher_7108 *swd; + u_int32_t sid = CRYPTO_SESID2LID(tid); + + dprintk("%s()\n", __FUNCTION__); + if (sid > c7108_sesnum || c7108_sessions == NULL || + c7108_sessions[sid] == NULL) { + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); + return(EINVAL); + } + + /* Silently accept and return */ + if (sid == 0) + return(0); + + while ((swd = c7108_sessions[sid]) != NULL) { + c7108_sessions[sid] = swd->next; + kfree(swd); + } + return 0; +} + +/* + * Process a hardware request. + */ +static int +c7108_process(void *arg, struct cryptop *crp, int hint) +{ + struct cryptodesc *crd; + struct cipher_7108 *sw; + u_int32_t lid; + int type; + u32 hwkey[8]; + +#define SCATTERLIST_MAX 16 + struct scatterlist sg[SCATTERLIST_MAX]; + int sg_num, sg_len, skip; + struct sk_buff *skb = NULL; + struct uio *uiop = NULL; + + dprintk("%s()\n", __FUNCTION__); + /* Sanity check */ + if (crp == NULL) { + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); + return EINVAL; + } + + crp->crp_etype = 0; + + if (crp->crp_desc == NULL || crp->crp_buf == NULL) { + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); + crp->crp_etype = EINVAL; + goto done; + } + + lid = crp->crp_sid & 0xffffffff; + if (lid >= c7108_sesnum || lid == 0 || c7108_sessions == NULL || + c7108_sessions[lid] == NULL) { + crp->crp_etype = ENOENT; + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__); + goto done; + } + + /* + * do some error checking outside of the loop for SKB and IOV + * processing this leaves us with valid skb or uiop pointers + * for later + */ + if (crp->crp_flags & CRYPTO_F_SKBUF) { + skb = (struct sk_buff *) crp->crp_buf; + if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) { + printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", + __FILE__, __LINE__, + skb_shinfo(skb)->nr_frags); + goto done; + } + } else if (crp->crp_flags & CRYPTO_F_IOV) { + uiop = (struct uio *) crp->crp_buf; + if (uiop->uio_iovcnt > SCATTERLIST_MAX) { + printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", + __FILE__, __LINE__, + uiop->uio_iovcnt); + goto done; + } + } + + /* Go through crypto descriptors, processing as we go */ + for (crd = crp->crp_desc; crd; crd = crd->crd_next) { + /* + * Find the crypto context. + * + * XXX Note that the logic here prevents us from having + * XXX the same algorithm multiple times in a session + * XXX (or rather, we can but it won't give us the right + * XXX results). To do that, we'd need some way of differentiating + * XXX between the various instances of an algorithm (so we can + * XXX locate the correct crypto context). + */ + for (sw = c7108_sessions[lid]; + sw && sw->cri_alg != crd->crd_alg; + sw = sw->next) + ; + + /* No such context ? */ + if (sw == NULL) { + crp->crp_etype = EINVAL; + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); + goto done; + } + + skip = crd->crd_skip; + + /* + * setup the SG list skip from the start of the buffer + */ + memset(sg, 0, sizeof(sg)); + if (crp->crp_flags & CRYPTO_F_SKBUF) { + int i, len; + type = CRYPTO_BUF_SKBUF; + + sg_num = 0; + sg_len = 0; + + if (skip < skb_headlen(skb)) { + //sg[sg_num].page = virt_to_page(skb->data + skip); + //sg[sg_num].offset = offset_in_page(skb->data + skip); + len = skb_headlen(skb) - skip; + if (len + sg_len > crd->crd_len) + len = crd->crd_len - sg_len; + //sg[sg_num].length = len; + sg_set_page(&sg[sg_num], virt_to_page(skb->data + skip), len, offset_in_page(skb->data + skip)); + sg_len += sg[sg_num].length; + sg_num++; + skip = 0; + } else + skip -= skb_headlen(skb); + + for (i = 0; sg_len < crd->crd_len && + i < skb_shinfo(skb)->nr_frags && + sg_num < SCATTERLIST_MAX; i++) { + if (skip < skb_shinfo(skb)->frags[i].size) { + //sg[sg_num].page = skb_shinfo(skb)->frags[i].page; + //sg[sg_num].offset = skb_shinfo(skb)->frags[i].page_offset + skip; + len = skb_shinfo(skb)->frags[i].size - skip; + if (len + sg_len > crd->crd_len) + len = crd->crd_len - sg_len; + //sg[sg_num].length = len; + sg_set_page(&sg[sg_num], skb_shinfo(skb)->frags[i].page, len, skb_shinfo(skb)->frags[i].page_offset + skip); + sg_len += sg[sg_num].length; + sg_num++; + skip = 0; + } else + skip -= skb_shinfo(skb)->frags[i].size; + } + } else if (crp->crp_flags & CRYPTO_F_IOV) { + int len; + type = CRYPTO_BUF_IOV; + sg_len = 0; + for (sg_num = 0; sg_len < crd->crd_len && + sg_num < uiop->uio_iovcnt && + sg_num < SCATTERLIST_MAX; sg_num++) { + if (skip < uiop->uio_iov[sg_num].iov_len) { + //sg[sg_num].page = virt_to_page(uiop->uio_iov[sg_num].iov_base+skip); + //sg[sg_num].offset = offset_in_page(uiop->uio_iov[sg_num].iov_base+skip); + len = uiop->uio_iov[sg_num].iov_len - skip; + if (len + sg_len > crd->crd_len) + len = crd->crd_len - sg_len; + //sg[sg_num].length = len; + sg_set_page(&sg[sg_num], virt_to_page(uiop->uio_iov[sg_num].iov_base+skip), len, offset_in_page(uiop->uio_iov[sg_num].iov_base+skip)); + sg_len += sg[sg_num].length; + skip = 0; + } else + skip -= uiop->uio_iov[sg_num].iov_len; + } + } else { + type = CRYPTO_BUF_CONTIG; + //sg[0].page = virt_to_page(crp->crp_buf + skip); + //sg[0].offset = offset_in_page(crp->crp_buf + skip); + sg_len = (crp->crp_ilen - skip); + if (sg_len > crd->crd_len) + sg_len = crd->crd_len; + //sg[0].length = sg_len; + sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip), sg_len, offset_in_page(crp->crp_buf + skip)); + sg_num = 1; + } + + + switch (sw->xfm_type) { + + case HW_TYPE_CIPHER: { + + unsigned char iv[64]; + unsigned char *ivp = iv; + int i; + int ivsize = 16; /* fixed for AES */ + int blocksize = 16; /* fixed for AES */ + + if (sg_len < blocksize) { + crp->crp_etype = EINVAL; + dprintk("%s,%d: EINVAL len %d < %d\n", + __FILE__, __LINE__, + sg_len, + blocksize); + goto done; + } + + if (ivsize > sizeof(iv)) { + crp->crp_etype = EINVAL; + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); + goto done; + } + + if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */ + + if (crd->crd_flags & CRD_F_IV_EXPLICIT) { + ivp = crd->crd_iv; + } else { + get_random_bytes(ivp, ivsize); + } + /* + * do we have to copy the IV back to the buffer ? + */ + if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) { + crypto_copyback(crp->crp_buf, + crd->crd_inject, + ivsize, + (caddr_t)ivp); + } + + c7108_xlate_key(crd->crd_klen, + (u8*)crd->crd_key, (u32*)hwkey); + + /* Encrypt SG list */ + for (i = 0; i < sg_num; i++) { + sg[i].dma_address = + dma_map_single(NULL, + kmap(sg_page(&sg[i])) + sg[i].offset, sg_len, DMA_BIDIRECTIONAL); +#if 0 + printk("sg[%d]:0x%08x, off 0x%08x " + "kmap 0x%08x phys 0x%08x\n", + i, sg[i].page, sg[i].offset, + kmap(sg[i].page) + sg[i].offset, + sg[i].dma_address); +#endif + c7108_aes_cipher(C7108_AES_ENCRYPT, + sg[i].dma_address, + sg[i].dma_address, + sg_len, + crd->crd_klen, + c7108_crypto_mode, + hwkey, + ivp); + + if ((c7108_crypto_mode == C7108_AES_CTRL_MODE_CBC)|| + (c7108_crypto_mode == C7108_AES_CTRL_MODE_ECB)) { + /* Read back expanded key and cache it in key + * context. + * NOTE: for ECB/CBC modes only (not CTR, CFB, OFB) + * where you set the key once. + */ + c7108_cache_key(crd->crd_klen, + (u32*)hwkey, (u8*)crd->crd_key); +#if 0 + printk("%s expanded key:", __FUNCTION__); + for (i = 0; i < (crd->crd_klen + 7) / 8; i++) + printk("%s0x%02x", (i % 8) ? " " : "\n ", + crd->crd_key[i]); + printk("\n"); +#endif + } + } + } + else { /*decrypt */ + + if (crd->crd_flags & CRD_F_IV_EXPLICIT) { + ivp = crd->crd_iv; + } else { + crypto_copydata(crp->crp_buf, crd->crd_inject, + ivsize, (caddr_t)ivp); + } + + c7108_xlate_key(crd->crd_klen, + (u8*)crd->crd_key, (u32*)hwkey); + + /* Decrypt SG list */ + for (i = 0; i < sg_num; i++) { + sg[i].dma_address = + dma_map_single(NULL, + kmap(sg_page(&sg[i])) + sg[i].offset, + sg_len, DMA_BIDIRECTIONAL); + +#if 0 + printk("sg[%d]:0x%08x, off 0x%08x " + "kmap 0x%08x phys 0x%08x\n", + i, sg[i].page, sg[i].offset, + kmap(sg[i].page) + sg[i].offset, + sg[i].dma_address); +#endif + c7108_aes_cipher(C7108_AES_DECRYPT, + sg[i].dma_address, + sg[i].dma_address, + sg_len, + crd->crd_klen, + c7108_crypto_mode, + hwkey, + ivp); + } + } + } break; + case SW_TYPE_HMAC: + case SW_TYPE_HASH: + crp->crp_etype = EINVAL; + goto done; + break; + + case SW_TYPE_COMP: + crp->crp_etype = EINVAL; + goto done; + break; + + default: + /* Unknown/unsupported algorithm */ + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__); + crp->crp_etype = EINVAL; + goto done; + } + } + +done: + crypto_done(crp); + return 0; +} + +static struct { + softc_device_decl sc_dev; +} a7108dev; + +static device_method_t a7108_methods = { +/* crypto device methods */ + DEVMETHOD(cryptodev_newsession, c7108_newsession), + DEVMETHOD(cryptodev_freesession, c7108_freesession), + DEVMETHOD(cryptodev_process, c7108_process), + DEVMETHOD(cryptodev_kprocess, NULL) +}; + +static int +cypher_7108_crypto_init(void) +{ + dprintk("%s(%p)\n", __FUNCTION__, cypher_7108_crypto_init); + + iobar = (unsigned long)ioremap(CCU_AES_REG_BASE, 0x4000); + printk("7108: AES @ 0x%08x (0x%08x phys) %s mode\n", + iobar, CCU_AES_REG_BASE, + c7108_crypto_mode & C7108_AES_CTRL_MODE_CBC ? "CBC" : + c7108_crypto_mode & C7108_AES_CTRL_MODE_ECB ? "ECB" : + c7108_crypto_mode & C7108_AES_CTRL_MODE_CTR ? "CTR" : + c7108_crypto_mode & C7108_AES_CTRL_MODE_CFB ? "CFB" : + c7108_crypto_mode & C7108_AES_CTRL_MODE_OFB ? "OFB" : "???"); + csr_mutex = SPIN_LOCK_UNLOCKED; + + memset(&a7108dev, 0, sizeof(a7108dev)); + softc_device_init(&a7108dev, "aes7108", 0, a7108_methods); + + c7108_id = crypto_get_driverid(softc_get_device(&a7108dev), CRYPTOCAP_F_HARDWARE); + if (c7108_id < 0) + panic("7108: crypto device cannot initialize!"); + +// crypto_register(c7108_id, CRYPTO_AES_CBC, 0, 0, c7108_newsession, c7108_freesession, c7108_process, NULL); + crypto_register(c7108_id, CRYPTO_AES_CBC, 0, 0); + + return(0); +} + +static void +cypher_7108_crypto_exit(void) +{ + dprintk("%s()\n", __FUNCTION__); + crypto_unregister_all(c7108_id); + c7108_id = -1; +} + +module_init(cypher_7108_crypto_init); +module_exit(cypher_7108_crypto_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Cypher 7108 Crypto (OCF module for kernel crypto)"); diff -Nur linux-2.6.36.orig/crypto/ocf/c7108/aes-7108.h linux-2.6.36/crypto/ocf/c7108/aes-7108.h --- linux-2.6.36.orig/crypto/ocf/c7108/aes-7108.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.36/crypto/ocf/c7108/aes-7108.h 2010-11-09 20:28:04.102495305 +0100 @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2006 Micronas USA + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Effort sponsored in part by the Defense Advanced Research Projects + * Agency (DARPA) and Air Force Research Laboratory, Air Force + * Materiel Command, USAF, under agreement number F30602-01-2-0537. + * + */ + +#ifndef __AES_7108_H__ +#define __AES_7108_H__ + +/* Cypher 7108 AES Controller Hardware */ +#define CCU_REG_BASE 0x1b500000 +#define CCU_AES_REG_BASE (CCU_REG_BASE + 0x100) +#define C7108_AES_KEY0_LO (0x0000) +#define C7108_AES_KEY0_HI (0x0004) +#define C7108_AES_KEY1_LO (0x0008) +#define C7108_AES_KEY1_HI (0x000c) +#define C7108_AES_KEY2_LO (0x0010) +#define C7108_AES_KEY2_HI (0x0014) +#define C7108_AES_KEY3_LO (0x0018) +#define C7108_AES_KEY3_HI (0x001c) +#define C7108_AES_KEY4_LO (0x0020) +#define C7108_AES_KEY4_HI (0x0024) +#define C7108_AES_KEY5_LO (0x0028) +#define C7108_AES_KEY5_HI (0x002c) +#define C7108_AES_KEY6_LO (0x0030) +#define C7108_AES_KEY6_HI (0x0034) +#define C7108_AES_KEY7_LO (0x0038) +#define C7108_AES_KEY7_HI (0x003c) +#define C7108_AES_IV0_LO (0x0040) +#define C7108_AES_IV0_HI (0x0044) +#define C7108_AES_IV1_LO (0x0048) +#define C7108_AES_IV1_HI (0x004c) +#define C7108_AES_IV2_LO (0x0050) +#define C7108_AES_IV2_HI (0x0054) +#define C7108_AES_IV3_LO (0x0058) +#define C7108_AES_IV3_HI (0x005c) + +#define C7108_AES_DMA_SRC0_LO (0x0068) /* Bits 0:15 */ +#define C7108_AES_DMA_SRC0_HI (0x006c) /* Bits 27:16 */ +#define C7108_AES_DMA_DST0_LO (0x0070) /* Bits 0:15 */ +#define C7108_AES_DMA_DST0_HI (0x0074) /* Bits 27:16 */ +#define C7108_AES_DMA_LEN (0x0078) /*Bytes:(Count+1)x16 */ + +/* AES/Copy engine control register */ +#define C7108_AES_CTRL (0x007c) /* AES control */ +#define C7108_AES_CTRL_RS (1<<0) /* Which set of src/dst to use */ + +/* AES Cipher mode, controlled by setting Bits 2:0 */ +#define C7108_AES_CTRL_MODE_CBC 0 +#define C7108_AES_CTRL_MODE_CFB (1<<0) +#define C7108_AES_CTRL_MODE_OFB (1<<1) +#define C7108_AES_CTRL_MODE_CTR ((1<<0)|(1<<1)) +#define C7108_AES_CTRL_MODE_ECB (1<<2) + +/* AES Key length , Bits 5:4 */ +#define C7108_AES_KEY_LEN_128 0 /* 00 */ +#define C7108_AES_KEY_LEN_192 (1<<4) /* 01 */ +#define C7108_AES_KEY_LEN_256 (1<<5) /* 10 */ + +/* AES Operation (crypt/decrypt), Bit 3 */ +#define C7108_AES_DECRYPT (1<<3) /* Clear for encrypt */ +#define C7108_AES_ENCRYPT 0 +#define C7108_AES_INTR (1<<13) /* Set on done trans from 0->1*/ +#define C7108_AES_GO (1<<14) /* Run */ +#define C7108_AES_OP_DONE (1<<15) /* Set when complete */ + + +/* Expanded key registers */ +#define C7108_AES_EKEY0_LO (0x0080) +#define C7108_AES_EKEY0_HI (0x0084) +#define C7108_AES_EKEY1_LO (0x0088) +#define C7108_AES_EKEY1_HI (0x008c) +#define C7108_AES_EKEY2_LO (0x0090) +#define C7108_AES_EKEY2_HI (0x0094) +#define C7108_AES_EKEY3_LO (0x0098) +#define C7108_AES_EKEY3_HI (0x009c) +#define C7108_AES_EKEY4_LO (0x00a0) +#define C7108_AES_EKEY4_HI (0x00a4) +#define C7108_AES_EKEY5_LO (0x00a8) +#define C7108_AES_EKEY5_HI (0x00ac) +#define C7108_AES_EKEY6_LO (0x00b0) +#define C7108_AES_EKEY6_HI (0x00b4) +#define C7108_AES_EKEY7_LO (0x00b8) +#define C7108_AES_EKEY7_HI (0x00bc) +#define C7108_AES_OK (0x00fc) /* Reset: "OK" */ + +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) + +/* Software session entry */ + +#define HW_TYPE_CIPHER 0 +#define SW_TYPE_HMAC 1 +#define SW_TYPE_AUTH2 2 +#define SW_TYPE_HASH 3 +#define SW_TYPE_COMP 4 + +struct cipher_7108 { + int xfm_type; + int cri_alg; + union { + struct { + char sw_key[HMAC_BLOCK_LEN]; + int sw_klen; + int sw_authlen; + } hmac; + } u; + struct cipher_7108 *next; +}; + + + +#endif /* __C7108_AES_7108_H__ */ diff -Nur linux-2.6.36.orig/crypto/ocf/c7108/Makefile linux-2.6.36/crypto/ocf/c7108/Makefile --- linux-2.6.36.orig/crypto/ocf/c7108/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.36/crypto/ocf/c7108/Makefile 2010-11-09 20:28:04.152495478 +0100 @@ -0,0 +1,12 @@ +# for SGlinux builds +-include $(ROOTDIR)/modules/.config + +obj-$(CONFIG_OCF_C7108) += aes-7108.o + +obj ?= . +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/ + +ifdef TOPDIR +-include $(TOPDIR)/Rules.make +endif + diff -Nur linux-2.6.36.orig/crypto/ocf/Config.in linux-2.6.36/crypto/ocf/Config.in --- linux-2.6.36.orig/crypto/ocf/Config.in 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.36/crypto/ocf/Config.in 2010-11-09 20:28:04.191247583 +0100 @@ -0,0 +1,36 @@ +############################################################################# + +mainmenu_option next_comment +comment 'OCF Configuration' +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF +dep_mbool ' enable fips RNG checks (fips check on RNG data before use)' \ + CONFIG_OCF_FIPS $CONFIG_OCF_OCF +dep_mbool ' enable harvesting entropy for /dev/random' \ + CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF +dep_tristate ' cryptodev (user space support)' \ + CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF +dep_tristate ' cryptosoft (software crypto engine)' \ + CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF +dep_tristate ' safenet (HW crypto engine)' \ + CONFIG_OCF_SAFE $CONFIG_OCF_OCF +dep_tristate ' IXP4xx (HW crypto engine)' \ + CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF +dep_mbool ' Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \ + CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX +dep_tristate ' hifn (HW crypto engine)' \ + CONFIG_OCF_HIFN $CONFIG_OCF_OCF +dep_tristate ' talitos (HW crypto engine)' \ + CONFIG_OCF_TALITOS $CONFIG_OCF_OCF +dep_tristate ' pasemi (HW crypto engine)' \ + CONFIG_OCF_PASEMI $CONFIG_OCF_OCF +dep_tristate ' ep80579 (HW crypto engine)' \ + CONFIG_OCF_EP80579 $CONFIG_OCF_OCF +dep_tristate ' Micronas c7108 (HW crypto engine)' \ + CONFIG_OCF_C7108 $CONFIG_OCF_OCF +dep_tristate ' ocfnull (does no crypto)' \ + CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF +dep_tristate ' ocf-bench (HW crypto in-kernel benchmark)' \ + CONFIG_OCF_BENCH $CONFIG_OCF_OCF +endmenu + +############################################################################# diff -Nur linux-2.6.36.orig/crypto/ocf/criov.c linux-2.6.36/crypto/ocf/criov.c --- linux-2.6.36.orig/crypto/ocf/criov.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.36/crypto/ocf/criov.c 2010-11-09 20:28:04.232050075 +0100 @@ -0,0 +1,215 @@ +/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */ + +/* + * Linux port done by David McCullough <david_mccullough@mcafee.com> + * Copyright (C) 2006-2010 David McCullough + * Copyright (C) 2004-2005 Intel Corporation. + * The license and original author are listed below. + * + * Copyright (c) 1999 Theo de Raadt + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $"); + */ + +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/uio.h> +#include <linux/skbuff.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <asm/io.h> + +#include <uio.h> +#include <cryptodev.h> + +/* + * This macro is only for avoiding code duplication, as we need to skip + * given number of bytes in the same way in three functions below. + */ +#define CUIO_SKIP() do { \ + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \ + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \ + while (off > 0) { \ + KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \ + if (off < iov->iov_len) \ + break; \ + off -= iov->iov_len; \ + iol--; \ + iov++; \ + } \ +} while (0) + +void +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp) +{ + struct iovec *iov = uio->uio_iov; + int iol = uio->uio_iovcnt; + unsigned count; + + CUIO_SKIP(); + while (len > 0) { + KASSERT(iol >= 0, ("%s: empty", __func__)); + count = min((int)(iov->iov_len - off), len); + memcpy(cp, ((caddr_t)iov->iov_base) + off, count); + len -= count; + cp += count; + off = 0; + iol--; + iov++; + } +} + +void +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp) +{ + struct iovec *iov = uio->uio_iov; + int iol = uio->uio_iovcnt; + unsigned count; + + CUIO_SKIP(); + while (len > 0) { + KASSERT(iol >= 0, ("%s: empty", __func__)); + count = min((int)(iov->iov_len - off), len); + memcpy(((caddr_t)iov->iov_base) + off, cp, count); + len -= count; + cp += count; + off = 0; + iol--; + iov++; + } +} + +/* + * Return a pointer to iov/offset of location in iovec list. + */ +struct iovec * +cuio_getptr(struct uio *uio, int loc, int *off) +{ + struct iovec *iov = uio->uio_iov; + int iol = uio->uio_iovcnt; + + while (loc >= 0) { + /* Normal end of search */ + if (loc < iov->iov_len) { + *off = loc; + return (iov); + } + + loc -= iov->iov_len; + if (iol == 0) { + if (loc == 0) { + /* Point at the end of valid data */ + *off = iov->iov_len; + return (iov); + } else + return (NULL); + } else { + iov++, iol--; + } + } + + return (NULL); +} + +EXPORT_SYMBOL(cuio_copyback); +EXPORT_SYMBOL(cuio_copydata); +EXPORT_SYMBOL(cuio_getptr); + + +static void +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len) +{ + int i; + if (offset < skb_headlen(skb)) { + memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len)); + len -= skb_headlen(skb); + cp += skb_headlen(skb); + } + offset -= skb_headlen(skb); + for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) { + if (offset < skb_shinfo(skb)->frags[i].size) { + memcpy(page_address(skb_shinfo(skb)->frags[i].page) + + skb_shinfo(skb)->frags[i].page_offset, + cp, min_t(int, skb_shinfo(skb)->frags[i].size, len)); + len -= skb_shinfo(skb)->frags[i].size; + cp += skb_shinfo(skb)->frags[i].size; + } + offset -= skb_shinfo(skb)->frags[i].size; + } +} + +void +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in) +{ + + if ((flags & CRYPTO_F_SKBUF) != 0) + skb_copy_bits_back((struct sk_buff *)buf, off, in, size); + else if ((flags & CRYPTO_F_IOV) != 0) + cuio_copyback((struct uio *)buf, off, size, in); + else + bcopy(in, buf + off, size); +} + +void +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out) +{ + + if ((flags & CRYPTO_F_SKBUF) != 0) + skb_copy_bits((struct sk_buff *)buf, off, out, size); + else if ((flags & CRYPTO_F_IOV) != 0) + cuio_copydata((struct uio *)buf, off, size, out); + else + bcopy(buf + off, out, size); +} + +int +crypto_apply(int flags, caddr_t buf, int off, int len, + int (*f)(void *, void *, u_int), void *arg) +{ +#if 0 + int error; + + if ((flags & CRYPTO_F_SKBUF) != 0) + error = XXXXXX((struct mbuf *)buf, off, len, f, arg); + else if ((flags & CRYPTO_F_IOV) != 0) + error = cuio_apply((struct uio *)buf, off, len, f, arg); + else + error = (*f)(arg, buf + off, len); + return (error); +#else + KASSERT(0, ("crypto_apply not implemented!\n")); +#endif + return 0; +} + +EXPORT_SYMBOL(crypto_copyback); +EXPORT_SYMBOL(crypto_copydata); +EXPORT_SYMBOL(crypto_apply); + diff -Nur linux-2.6.36.orig/crypto/ocf/crypto.c linux-2.6.36/crypto/ocf/crypto.c --- linux-2.6.36.orig/crypto/ocf/crypto.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-2.6.36/crypto/ocf/crypto.c 2010-11-09 20:28:04.272495385 +0100 @@ -0,0 +1,1784 @@ +/*- + * Linux port done by David McCullough <david_mccullough@mcafee.com> + * Copyright (C) 2006-2010 David McCullough + * Copyright (C) 2004-2005 Intel Corporation. + * The license and original author are listed below. + * + * Redistribution and use in source and binary forms, with or without + * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. + * + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if 0 +#include <sys/cdefs.h> +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $"); +#endif + +/* + * Cryptographic Subsystem. + * + * This code is derived from the Openbsd Cryptographic Framework (OCF) + * that has the copyright shown below. Very little of the original + * code remains. + */ +/*- + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) + * + * This code was written by Angelos D. Keromytis in Athens, Greece, in + * February 2000. Network Security Technologies Inc. (NSTI) kindly + * supported the development of this code. + * + * Copyright (c) 2000, 2001 Angelos D. Keromytis + * + * Permission to use, copy, and modify this software with or without fee + * is hereby granted, provided that this entire notice is included in + * all source code copies of any software which is or includes a copy or + * modification of this software. + * + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR + * PURPOSE. + * +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $"); + */ + + +#ifndef AUTOCONF_INCLUDED +#include <linux/config.h> +#endif +#include <linux/module.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/version.h> +#include <cryptodev.h> + +/* + * keep track of whether or not we have been initialised, a big + * issue if we are linked into the kernel and a driver gets started before + * us + */ +static int crypto_initted = 0; + +/* + * Crypto drivers register themselves by allocating a slot in the + * crypto_drivers table with crypto_get_driverid() and then registering + * each algorithm they support with crypto_register() and crypto_kregister(). + */ + +/* + * lock on driver table + * we track its state as spin_is_locked does not do anything on non-SMP boxes + */ +static spinlock_t crypto_drivers_lock; +static int crypto_drivers_locked; /* for non-SMP boxes */ + +#define CRYPTO_DRIVER_LOCK() \ + ({ \ + spin_lock_irqsave(&crypto_drivers_lock, d_flags); \ + crypto_drivers_locked = 1; \ + dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \ + }) +#define CRYPTO_DRIVER_UNLOCK() \ + ({ \ + dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \ + crypto_drivers_locked = 0; \ + spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \ + }) +#define CRYPTO_DRIVER_ASSERT() \ + ({ \ + if (!crypto_drivers_locked) { \ + dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \ + } \ + }) + +/* + * Crypto device/driver capabilities structure. + * + * Synchronization: + * (d) - protected by CRYPTO_DRIVER_LOCK() + * (q) - protected by CRYPTO_Q_LOCK() + * Not tagged fields are read-only. + */ +struct cryptocap { + device_t cc_dev; /* (d) device/driver */ + u_int32_t cc_sessions; /* (d) # of sessions */ + u_int32_t cc_koperations; /* (d) # os asym operations */ + /* + * Largest possible operator length (in bits) for each type of + * encryption algorithm. XXX not used + */ + u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1]; + u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1]; + u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; + + int cc_flags; /* (d) flags */ +#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ + int cc_qblocked; /* (q) symmetric q blocked */ + int cc_kqblocked; /* (q) asymmetric q blocked */ + + int cc_unqblocked; /* (q) symmetric q blocked */ + int cc_unkqblocked; /* (q) asymmetric q blocked */ +}; +static struct cryptocap *crypto_drivers = NULL; +static int crypto_drivers_num = 0; + +/* + * There are two queues for crypto requests; one for symmetric (e.g. + * cipher) operations and one for asymmetric (e.g. MOD)operations. + * A single mutex is used to lock access to both queues. We could + * have one per-queue but having one simplifies handling of block/unblock + * operations. + */ +static int crp_sleep = 0; +static LIST_HEAD(crp_q); /* request queues */ +static LIST_HEAD(crp_kq); + +static spinlock_t crypto_q_lock; + +int crypto_all_qblocked = 0; /* protect with Q_LOCK */ +module_param(crypto_all_qblocked, int, 0444); +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked"); + +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */ +module_param(crypto_all_kqblocked, int, 0444); +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked"); + +#define CRYPTO_Q_LOCK() \ + ({ \ + spin_lock_irqsave(&crypto_q_lock, q_flags); \ + dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \ + }) +#define CRYPTO_Q_UNLOCK() \ + ({ \ + dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \ + spin_unlock_irqrestore(&crypto_q_lock, q_flags); \ + }) + +/* + * There are two queues for processing completed crypto requests; one + * for the symmetric and one for the asymmetric ops. We only need one + * but have two to avoid type futzing (cryptop vs. cryptkop). A single + * mutex is used to lock access to both queues. Note that this lock + * must be separate from the lock on request queues to insure driver + * callbacks don't generate lock order reversals. + */ +static LIST_HEAD(crp_ret_q); /* callback queues */ +static LIST_HEAD(crp_ret_kq); + +static spinlock_t crypto_ret_q_lock; +#define CRYPTO_RETQ_LOCK() \ + ({ \ + spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \ + dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \ + }) +#define CRYPTO_RETQ_UNLOCK() \ + ({ \ + dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \ + spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \ + }) +#define CRYPTO_RETQ_EMPTY() (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq)) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static kmem_cache_t *cryptop_zone; +static kmem_cache_t *cryptodesc_zone; +#else +static struct kmem_cache *cryptop_zone; +static struct kmem_cache *cryptodesc_zone; +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) +#include <linux/sched.h> +#define kill_proc(p,s,v) send_sig(s,find_task_by_vpid(p),0) +#endif + +#define debug crypto_debug +int crypto_debug = 0; +module_param(crypto_debug, int, 0644); +MODULE_PARM_DESC(crypto_debug, "Enable debug"); +EXPORT_SYMBOL(crypto_debug); + +/* + * Maximum number of outstanding crypto requests before we start + * failing requests. We need this to prevent DOS when too many + * requests are arriving for us to keep up. Otherwise we will + * run the system out of memory. Since crypto is slow, we are + * usually the bottleneck that needs to say, enough is enough. + * + * We cannot print errors when this condition occurs, we are already too + * slow, printing anything will just kill us + */ + +static int crypto_q_cnt = 0; +module_param(crypto_q_cnt, int, 0444); +MODULE_PARM_DESC(crypto_q_cnt, + "Current number of outstanding crypto requests"); + +static int crypto_q_max = 1000; +module_param(crypto_q_max, int, 0644); +MODULE_PARM_DESC(crypto_q_max, + "Maximum number of outstanding crypto requests"); + +#define bootverbose crypto_verbose +static int crypto_verbose = 0; +module_param(crypto_verbose, int, 0644); +MODULE_PARM_DESC(crypto_verbose, + "Enable verbose crypto startup"); + +int crypto_usercrypto = 1; /* userland may do crypto reqs */ +module_param(crypto_usercrypto, int, 0644); +MODULE_PARM_DESC(crypto_usercrypto, + "Enable/disable user-mode access to crypto support"); + +int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ +module_param(crypto_userasymcrypto, int, 0644); +MODULE_PARM_DESC(crypto_userasymcrypto, + "Enable/disable user-mode access to asymmetric crypto support"); + +int crypto_devallowsoft = 0; /* only use hardware crypto */ +module_param(crypto_devallowsoft, int, 0644); +MODULE_PARM_DESC(crypto_devallowsoft, + "Enable/disable use of software crypto support"); + +/* + * This parameter controls the maximum number of crypto operations to + * do consecutively in the crypto kernel thread before scheduling to allow + * other processes to run. Without it, it is possible to get into a + * situation where the crypto thread never allows any other processes to run. + * Default to 1000 which should be less than one second. + */ +static int crypto_max_loopcount = 1000; +module_param(crypto_max_loopcount, int, 0644); +MODULE_PARM_DESC(crypto_max_loopcount, + "Maximum number of crypto ops to do before yielding to other processes"); + +static pid_t cryptoproc = (pid_t) -1; +static struct completion cryptoproc_exited; +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait); +static pid_t cryptoretproc = (pid_t) -1; +static struct completion cryptoretproc_exited; +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait); + +static int crypto_proc(void *arg); +static int crypto_ret_proc(void *arg); +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); +static int crypto_kinvoke(struct cryptkop *krp, int flags); +static void crypto_exit(void); +static int crypto_init(void); + +static struct cryptostats cryptostats; + +static struct cryptocap * +crypto_checkdriver(u_int32_t hid) +{ + if (crypto_drivers == NULL) + return NULL; + return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); +} + +/* + * Compare a driver's list of supported algorithms against another + * list; return non-zero if all algorithms are supported. + */ +static int +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri) +{ + const struct cryptoini *cr; + + /* See if all the algorithms are supported. */ + for (cr = cri; cr; cr = cr->cri_next) + if (cap->cc_alg[cr->cri_alg] == 0) + return 0; + return 1; +} + +/* + * Select a driver for a new session that supports the specified + * algorithms and, optionally, is constrained according to the flags. + * The algorithm we use here is pretty stupid; just use the + * first driver that supports all the algorithms we need. If there + * are multiple drivers we choose the driver with the fewest active + * sessions. We prefer hardware-backed drivers to software ones. + * + * XXX We need more smarts here (in real life too, but that's + * XXX another story altogether). + */ +static struct cryptocap * +crypto_select_driver(const struct cryptoini *cri, int flags) +{ + struct cryptocap *cap, *best; + int match, hid; + + CRYPTO_DRIVER_ASSERT(); + + /* + * Look first for hardware crypto devices if permitted. + */ + if (flags & CRYPTOCAP_F_HARDWARE) + match = CRYPTOCAP_F_HARDWARE; + else + match = CRYPTOCAP_F_SOFTWARE; + best = NULL; +again: + for (hid = 0; hid < crypto_drivers_num; hid++) { + cap = &crypto_drivers[hid]; + /* + * If it's not initialized, is in the process of + * going away, or is not appropriate (hardware + * or software based on match), then skip. + */ + if (cap->cc_dev == NULL || + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || + (cap->cc_flags & match) == 0) + continue; + + /* verify all the algorithms are supported. */ + if (driver_suitable(cap, cri)) { + if (best == NULL || + cap->cc_sessions < best->cc_sessions) + best = cap; + } + } + if (best != NULL) + return best; + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { + /* sort of an Algol 68-style for loop */ + match = CRYPTOCAP_F_SOFTWARE; + goto again; + } + return best; +} + +/* + * Create a new session. The crid argument specifies a crypto + * driver to use or constraints on a driver to select (hardware + * only, software only, either). Whatever driver is selected + * must be capable of the requested crypto algorithms. + */ +int +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid) +{ + struct cryptocap *cap; + u_int32_t hid, lid; + int err; + unsigned long d_flags; + + CRYPTO_DRIVER_LOCK(); + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { + /* + * Use specified driver; verify it is capable. + */ + cap = crypto_checkdriver(crid); + if (cap != NULL && !driver_suitable(cap, cri)) + cap = NULL; + } else { + /* + * No requested driver; select based on crid flags. + */ + cap = crypto_select_driver(cri, crid); + /* + * if NULL then can't do everything in one session. + * XXX Fix this. We need to inject a "virtual" session + * XXX layer right about here. + */ + } + if (cap != NULL) { + /* Call the driver initialization routine. */ + hid = cap - crypto_drivers; + lid = hid; /* Pass the driver ID. */ + cap->cc_sessions++; + CRYPTO_DRIVER_UNLOCK(); + err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri); + CRYPTO_DRIVER_LOCK(); + if (err == 0) { + (*sid) = (cap->cc_flags & 0xff000000) + | (hid & 0x00ffffff); + (*sid) <<= 32; + (*sid) |= (lid & 0xffffffff); + } else + cap->cc_sessions--; + } else + err = EINVAL; + CRYPTO_DRIVER_UNLOCK(); + return err; +} + +static void +crypto_remove(struct cryptocap *cap) +{ + CRYPTO_DRIVER_ASSERT(); + if (cap->cc_sessions == 0 && cap->cc_koperations == 0) + bzero(cap, sizeof(*cap)); +} + +/* + * Delete an existing session (or a reserved session on an unregistered + * driver). + */ +int +crypto_freesession(u_int64_t sid) +{ + struct cryptocap *cap; + u_int32_t hid; + int err = 0; + unsigned long d_flags; + + dprintk("%s()\n", __FUNCTION__); + CRYPTO_DRIVER_LOCK(); + + if (crypto_drivers == NULL) { + err = EINVAL; + goto done; + } + + /* Determine two IDs. */ + hid = CRYPTO_SESID2HID(sid); + + if (hid >= crypto_drivers_num) { + dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid); + err = ENOENT; + goto done; + } + cap = &crypto_drivers[hid]; + + if (cap->cc_dev) { + CRYPTO_DRIVER_UNLOCK(); + /* Call the driver cleanup routine, if available, unlocked. */ + err = CRYPTODEV_FREESESSION(cap->cc_dev, sid); + CRYPTO_DRIVER_LOCK(); + } + + if (cap->cc_sessions) + cap->cc_sessions--; + + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) + crypto_remove(cap); + +done: + CRYPTO_DRIVER_UNLOCK(); + return err; +} + +/* + * Return an unused driver id. Used by drivers prior to registering + * support for the algorithms they handle. + */ +int32_t +crypto_get_driverid(device_t dev, int flags) +{ + struct cryptocap *newdrv; + int i; + unsigned long d_flags; + + if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { + printf("%s: no flags specified when registering driver\n", + device_get_nameunit(dev)); + return -1; + } + + CRYPTO_DRIVER_LOCK(); + + for (i = 0; i < crypto_drivers_num; i++) { + if (crypto_drivers[i].cc_dev == NULL && + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { + break; + } + } + + /* Out of entries, allocate some more. */ + if (i == crypto_drivers_num) { + /* Be careful about wrap-around. */ + if (2 * crypto_drivers_num <= crypto_drivers_num) { + CRYPTO_DRIVER_UNLOCK(); + printk("crypto: driver count wraparound!\n"); + return -1; + } + + newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap), + GFP_KERNEL); + if (newdrv == NULL) { + CRYPTO_DRIVER_UNLOCK(); + printk("crypto: no space to expand driver table!\n"); + return -1; + } + + memcpy(newdrv, crypto_drivers, + crypto_drivers_num * sizeof(struct cryptocap)); + memset(&newdrv[crypto_drivers_num], 0, + crypto_drivers_num * sizeof(struct cryptocap)); + + crypto_drivers_num *= 2; + + kfree(crypto_drivers); + crypto_drivers = newdrv; + } + + /* NB: state is zero'd on free */ + crypto_drivers[i].cc_sessions = 1; /* Mark */ + crypto_drivers[i].cc_dev = dev; + crypto_drivers[i].cc_flags = flags; + if (bootverbose) + printf("crypto: assign %s driver id %u, flags %u\n", + device_get_nameunit(dev), i, flags); + + CRYPTO_DRIVER_UNLOCK(); + + return i; +} + +/* + * Lookup a driver by name. We match against the full device + * name and unit, and against just the name. The latter gives + * us a simple widlcarding by device name. On success return the + * driver/hardware identifier; otherwise return -1. + */ +int +crypto_find_driver(const char *match) +{ + int i, len = strlen(match); + unsigned long d_flags; + + CRYPTO_DRIVER_LOCK(); + for (i = 0; i < crypto_drivers_num; i++) { + device_t dev = crypto_drivers[i].cc_dev; + if (dev == NULL || + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP)) + continue; + if (strncmp(match, device_get_nameunit(dev), len) == 0 || + strncmp(match, device_get_name(dev), len) == 0) + break; + } + CRYPTO_DRIVER_UNLOCK(); + return i < crypto_drivers_num ? i : -1; +} + +/* + * Return the device_t for the specified driver or NULL + * if the driver identifier is invalid. + */ +device_t +crypto_find_device_byhid(int hid) +{ + struct cryptocap *cap = crypto_checkdriver(hid); + return cap != NULL ? cap->cc_dev : NULL; +} + +/* + * Return the device/driver capabilities. + */ +int +crypto_getcaps(int hid) +{ + struct cryptocap *cap = crypto_checkdriver(hid); + return cap != NULL ? cap->cc_flags : 0; +} + +/* + * Register support for a key-related algorithm. This routine + * is called once for each algorithm supported a driver. + */ +int +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) +{ + struct cryptocap *cap; + int err; + unsigned long d_flags; + + dprintk("%s()\n", __FUNCTION__); + CRYPTO_DRIVER_LOCK(); + + cap = crypto_checkdriver(driverid); + if (cap != NULL && + (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { + /* + * XXX Do some performance testing to determine placing. + * XXX We probably need an auxiliary data structure that + * XXX describes relative performances. + */ + + cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; + if (bootverbose) + printf("crypto: %s registers key alg %u flags %u\n" + , device_get_nameunit(cap->cc_dev) + , kalg + , flags + ); + err = 0; + } else + err = EINVAL; + + CRYPTO_DRIVER_UNLOCK(); + return err; +} + +/* + * Register support for a non-key-related algorithm. This routine + * is called once for each such algorithm supported by a driver. + */ +int +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, + u_int32_t flags) +{ + struct cryptocap *cap; + int err; + unsigned long d_flags; + + dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__, + driverid, alg, maxoplen, flags); + + CRYPTO_DRIVER_LOCK(); + + cap = crypto_checkdriver(driverid); + /* NB: algorithms are in the range [1..max] */ + if (cap != NULL && + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { + /* + * XXX Do some performance testing to determine placing. + * XXX We probably need an auxiliary data structure that + * XXX describes relative performances. + */ + + cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; + cap->cc_max_op_len[alg] = maxoplen; + if (bootverbose) + printf("crypto: %s registers alg %u flags %u maxoplen %u\n" + , device_get_nameunit(cap->cc_dev) + , alg + , flags + , maxoplen + ); + cap->cc_sessions = 0; /* Unmark */ + err = 0; + } else + err = EINVAL; + + CRYPTO_DRIVER_UNLOCK(); + return err; +} + +static void +driver_finis(struct cryptocap *cap) +{ + u_int32_t ses, kops; + + CRYPTO_DRIVER_ASSERT(); + + ses = cap->cc_sessions; + kops = cap->cc_koperations; + bzero(cap, sizeof(*cap)); + if (ses != 0 || kops != 0) { + /* + * If there are pending sessions, + * just mark as invalid. + */ + cap->cc_flags |= CRYPTOCAP_F_CLEANUP; + cap->cc_sessions = ses; + cap->cc_koperations = kops; + } +} + +/* + * Unregister a crypto driver. If there are pending sessions using it, + * leave enough information around so that subsequent calls using those + * sessions will correctly detect the driver has been unregistered and + * reroute requests. + */ +int +crypto_unregister(u_int32_t driverid, int alg) +{ + struct cryptocap *cap; + int i, err; + unsigned long d_flags; + + dprintk("%s()\n", __FUNCTION__); + CRYPTO_DRIVER_LOCK(); + + cap = crypto_checkdriver(driverid); + if (cap != NULL && + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && + cap->cc_alg[alg] != 0) { + cap->cc_alg[alg] = 0; + cap->cc_max_op_len[alg] = 0; + + /* Was this the last algorithm ? */ + for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) + if (cap->cc_alg[i] != 0) + break; + + if (i == CRYPTO_ALGORITHM_MAX + 1) + driver_finis(cap); + err = 0; + } else + err = EINVAL; + CRYPTO_DRIVER_UNLOCK(); + return err; +} + +/* + * Unregister all algorithms associated with a crypto driver. + * If there are pending sessions using it, leave enough information + * around so that subsequent calls using those sessions will + * correctly detect the driver has been unregistered and reroute + * requests. + */ +int +crypto_unregister_all(u_int32_t driverid) +{ + struct cryptocap *cap; + int err; + unsigned long d_flags; + + dprintk("%s()\n", __FUNCTION__); + CRYPTO_DRIVER_LOCK(); + cap = crypto_checkdriver(driverid); + if (cap != NULL) { + driver_finis(cap); + err = 0; + } else + err = EINVAL; + CRYPTO_DRIVER_UNLOCK(); + + return err; +} + +/* + * Clear blockage on a driver. The what parameter indicates whether + * the driver is now ready for cryptop's and/or cryptokop's. + */ +int +crypto_unblock(u_int32_t driverid, int what) +{ + struct cryptocap *cap; + int err; + unsigned long q_flags; + + CRYPTO_Q_LOCK(); + cap = crypto_checkdriver(driverid); + if (cap != NULL) { + if (what & CRYPTO_SYMQ) { + cap->cc_qblocked = 0; + cap->cc_unqblocked = 0; + crypto_all_qblocked = 0; + } + if (what & CRYPTO_ASYMQ) { + cap->cc_kqblocked = 0; + cap->cc_unkqblocked = 0; + crypto_all_kqblocked = 0; + } + if (crp_sleep) + wake_up_interruptible(&cryptoproc_wait); + err = 0; + } else + err = EINVAL; + CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock + + return err; +} + +/* + * Add a crypto request to a queue, to be processed by the kernel thread. + */ +int +crypto_dispatch(struct cryptop *crp) +{ + struct cryptocap *cap; + int result = -1; + unsigned long q_flags; + + dprintk("%s()\n", __FUNCTION__); + + cryptostats.cs_ops++; + + CRYPTO_Q_LOCK(); + if (crypto_q_cnt >= crypto_q_max) { + CRYPTO_Q_UNLOCK(); + cryptostats.cs_drops++; + return ENOMEM; + } + crypto_q_cnt++; + + /* make sure we are starting a fresh run on this crp. */ + crp->crp_flags &= ~CRYPTO_F_DONE;