Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
new file mode 100644
index 0000000..61406b9
--- /dev/null
+++ b/drivers/crypto/msm/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
+ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
+	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+else
+	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+endif
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
+obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
diff --git a/drivers/crypto/msm/inc/qce.h b/drivers/crypto/msm/inc/qce.h
new file mode 100644
index 0000000..7230036
--- /dev/null
+++ b/drivers/crypto/msm/inc/qce.h
@@ -0,0 +1,160 @@
+/* Qualcomm Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size  in bytes */
+#define SHA256_DIGESTSIZE		32
+#define SHA1_DIGESTSIZE			20
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE			(SHA1_DIGESTSIZE)    /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE		64
+#define DES_KEY_SIZE			8
+#define TRIPLE_DES_KEY_SIZE		24
+#define AES128_KEY_SIZE			16
+#define AES192_KEY_SIZE			24
+#define AES256_KEY_SIZE			32
+#define MAX_CIPHER_KEY_SIZE		AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH			16
+#define DES_IV_LENGTH                   8
+#define MAX_IV_LENGTH			AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA		0x8000
+
+/* Maximum Nonce bytes  */
+#define MAX_NONCE  16
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+		unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+	CIPHER_ALG_DES = 0,
+	CIPHER_ALG_3DES = 1,
+	CIPHER_ALG_AES = 2,
+	CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+	QCE_HASH_SHA1   = 0,
+	QCE_HASH_SHA256 = 1,
+	QCE_HASH_SHA1_HMAC   = 2,
+	QCE_HASH_SHA256_HMAC = 3,
+	QCE_HASH_AES_CMAC = 4,
+	QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+	QCE_ENCRYPT = 0,
+	QCE_DECRYPT = 1,
+	QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+	QCE_MODE_CBC = 0,
+	QCE_MODE_ECB = 1,
+	QCE_MODE_CTR = 2,
+	QCE_MODE_XTS = 3,
+	QCE_MODE_CCM = 4,
+	QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+	QCE_REQ_ABLK_CIPHER = 0,
+	QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+	QCE_REQ_AEAD = 2,
+	QCE_REQ_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+	bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+	bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+	bool cmac;
+	bool aes_key_192;
+	bool aes_xts;
+	bool aes_ccm;
+	bool ota;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	enum qce_hash_alg_enum alg;	/* sha algorithm */
+	unsigned char *digest;		/* sha digest  */
+	struct scatterlist *src;	/* pointer to scatter list entry */
+	uint32_t  auth_data[4];		/* byte count */
+	unsigned char *authkey;		/* auth key */
+	unsigned int  authklen;		/* auth key length */
+	bool first_blk;			/* first block indicator */
+	bool last_blk;			/* last block indicator */
+	unsigned int size;		/* data length in bytes */
+	void *areq;
+};
+
+struct qce_req {
+	enum qce_req_op_enum op;	/* operation type */
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	void *areq;
+	enum qce_cipher_alg_enum   alg;	/* cipher algorithms*/
+	enum qce_cipher_dir_enum dir;	/* encryption? decryption? */
+	enum qce_cipher_mode_enum mode;	/* algorithm mode */
+	unsigned char *authkey;		/* authentication key  */
+	unsigned int authklen;		/* authentication key kength */
+	unsigned int authsize;		/* authentication key kength */
+	unsigned char  nonce[MAX_NONCE];/* nonce for ccm mode */
+	unsigned char *assoc;		/* Ptr to formatted associated data */
+	unsigned int assoclen;		/* Formatted associated data length  */
+	struct scatterlist *asg;	/* Formatted associated data sg  */
+	unsigned char *enckey;		/* cipher key  */
+	unsigned int encklen;		/* cipher key length */
+	unsigned char *iv;		/* initialization vector */
+	unsigned int ivsize;		/* initialization vector size*/
+	unsigned int cryptlen;		/* data length */
+	unsigned int use_pmem;		/* is source of data PMEM allocated? */
+	struct qcedev_pmem_info *pmem;	/* pointer to pmem_info structure*/
+};
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+
+#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/inc/qce_ota.h b/drivers/crypto/msm/inc/qce_ota.h
new file mode 100644
index 0000000..f21bd0b
--- /dev/null
+++ b/drivers/crypto/msm/inc/qce_ota.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Crypto Engine driver OTA APIi */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include <linux/qcota.h>
+#include <inc/qce.h>
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */
diff --git a/drivers/crypto/msm/inc/qcedev.h b/drivers/crypto/msm/inc/qcedev.h
new file mode 100644
index 0000000..893251f
--- /dev/null
+++ b/drivers/crypto/msm/inc/qcedev.h
@@ -0,0 +1,267 @@
+/* Qualcomm Crypto Engine driver QCEDEV API
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCEDEV__H
+#define __QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QCEDEV_MAX_SHA_BLOCK_SIZE	64
+#define QCEDEV_MAX_BEARER	31
+#define QCEDEV_MAX_KEY_SIZE	64
+#define QCEDEV_MAX_IV_SIZE	32
+
+#define QCEDEV_MAX_BUFFERS      16
+#define QCEDEV_MAX_SHA_DIGEST	32
+
+#define QCEDEV_USE_PMEM		1
+#define QCEDEV_NO_PMEM		0
+
+#define QCEDEV_AES_KEY_128	16
+#define QCEDEV_AES_KEY_192	24
+#define QCEDEV_AES_KEY_256	32
+/**
+*qcedev_oper_enum: Operation types
+* @QCEDEV_OPER_ENC:		Encrypt
+* @QCEDEV_OPER_DEC:		Decrypt
+* @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
+*				user. Key already set by an external processor.
+* @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
+*				user. Key already set by an external processor.
+*/
+enum qcedev_oper_enum {
+  QCEDEV_OPER_DEC		= 0,
+  QCEDEV_OPER_ENC		= 1,
+  QCEDEV_OPER_DEC_NO_KEY	= 2,
+  QCEDEV_OPER_ENC_NO_KEY	= 3,
+  QCEDEV_OPER_LAST
+};
+
+/**
+*qcedev_oper_enum: Cipher algorithm types
+* @QCEDEV_ALG_DES:		DES
+* @QCEDEV_ALG_3DES:		3DES
+* @QCEDEV_ALG_AES:		AES
+*/
+enum qcedev_cipher_alg_enum {
+	QCEDEV_ALG_DES		= 0,
+	QCEDEV_ALG_3DES		= 1,
+	QCEDEV_ALG_AES		= 2,
+	QCEDEV_ALG_LAST
+};
+
+/**
+*qcedev_cipher_mode_enum : AES mode
+* @QCEDEV_AES_MODE_CBC:		CBC
+* @QCEDEV_AES_MODE_ECB:		ECB
+* @QCEDEV_AES_MODE_CTR:		CTR
+* @QCEDEV_AES_MODE_XTS:		XTS
+* @QCEDEV_AES_MODE_CCM:		CCM
+* @QCEDEV_DES_MODE_CBC:		CBC
+* @QCEDEV_DES_MODE_ECB:		ECB
+*/
+enum qcedev_cipher_mode_enum {
+	QCEDEV_AES_MODE_CBC	= 0,
+	QCEDEV_AES_MODE_ECB	= 1,
+	QCEDEV_AES_MODE_CTR	= 2,
+	QCEDEV_AES_MODE_XTS	= 3,
+	QCEDEV_AES_MODE_CCM	= 4,
+	QCEDEV_DES_MODE_CBC	= 5,
+	QCEDEV_DES_MODE_ECB	= 6,
+	QCEDEV_AES_DES_MODE_LAST
+};
+
+/**
+*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+* @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
+* @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
+* @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
+*/
+enum qcedev_sha_alg_enum {
+	QCEDEV_ALG_SHA1		= 0,
+	QCEDEV_ALG_SHA256	= 1,
+	QCEDEV_ALG_SHA1_HMAC	= 2,
+	QCEDEV_ALG_SHA256_HMAC	= 3,
+	QCEDEV_ALG_AES_CMAC	= 4,
+	QCEDEV_ALG_SHA_ALG_LAST
+};
+
+/**
+* struct buf_info - Buffer information
+* @offset:			Offset from the base address of the buffer
+*				(Used when buffer is allocated using PMEM)
+* @vaddr:			Virtual buffer address pointer
+* @len:				Size of the buffer
+*/
+struct	buf_info {
+	union{
+		uint32_t	offset;
+		uint8_t		*vaddr;
+	};
+	uint32_t	len;
+};
+
+/**
+* struct qcedev_vbuf_info - Source and destination Buffer information
+* @src:				Array of buf_info for input/source
+* @dst:				Array of buf_info for output/destination
+*/
+struct	qcedev_vbuf_info {
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+struct	qcedev_sha_ctxt{
+	uint32_t		auth_data[4];
+	uint8_t			digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t		diglen;
+	uint8_t			trailing_buf[64];
+	uint32_t		trailing_buf_len;
+	uint8_t			first_blk;
+	uint8_t			last_blk;
+	uint8_t			authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+};
+
+/**
+* struct qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src:			Handle to /dev/adsp_pmem used to allocate
+*				memory for input/src buffer
+* @src:				Array of buf_info for input/source
+* @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+*				memory for output/dst buffer
+* @dst:				Array of buf_info for output/destination
+* @pmem_src_offset:		The offset from input/src buffer
+*				(allocated by PMEM)
+*/
+struct	qcedev_pmem_info{
+	int		fd_src;
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	int		fd_dst;
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+* struct qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+*			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN):		Stores PMEM buffer information.
+*			Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT):	Stores Source and destination Buffer information
+*			Refer to struct qcedev_vbuf_info
+* @data_len (IN):	Total Length of input/src and output/dst in bytes
+* @in_place_op (IN):	Indicates whether the operation is inplace where
+*			source == destination
+*			When using PMEM allocated memory, must set this to 1
+* @enckey (IN):		128 bits of confidentiality key
+*			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+*			enckey[15] bit 7-0
+* @encklen (IN):	Length of the encryption key(set to 128  bits/16
+*			bytes in the driver)
+* @iv (IN/OUT):		Initialisation vector data
+*			This is updated by the driver, incremented by
+*			number of blocks encrypted/decrypted.
+* @ivlen (IN):		Length of the IV
+* @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+*			for AES-128 CTR mode only)
+* @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+*			Apllicabel when using AES algorithm only
+* @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+*			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the  buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key paramters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
+struct	qcedev_cipher_op_req {
+	uint8_t				use_pmem;
+	union{
+		struct qcedev_pmem_info	pmem;
+		struct qcedev_vbuf_info	vbuf;
+	};
+	uint32_t			entries;
+	uint32_t			data_len;
+	uint8_t				in_place_op;
+	uint8_t				enckey[QCEDEV_MAX_KEY_SIZE];
+	uint32_t			encklen;
+	uint8_t				iv[QCEDEV_MAX_IV_SIZE];
+	uint32_t			ivlen;
+	uint32_t			byteoffset;
+	enum qcedev_cipher_alg_enum	alg;
+	enum qcedev_cipher_mode_enum	mode;
+	enum qcedev_oper_enum		op;
+};
+
+/**
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN):			Array of pointers to the data to be hashed
+* @entries (IN):		Number of buf_info entries in the data array
+* @data_len (IN):		Length of data to be hashed
+* @digest (IN/OUT):		Returns the hashed data information
+* @diglen (OUT):		Size of the hashed/digest data
+* @authkey (IN):		Pointer to authentication key for HMAC
+* @authklen (IN):		Size of the authentication key
+* @alg (IN):			Secure Hash algorithm
+* @ctxt (Reserved):		RESERVED: User should not modify this data.
+*/
+struct	qcedev_sha_op_req {
+	struct buf_info			data[QCEDEV_MAX_BUFFERS];
+	uint32_t			entries;
+	uint32_t			data_len;
+	uint8_t				digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t			diglen;
+	uint8_t				*authkey;
+	uint32_t			authklen;
+	enum qcedev_sha_alg_enum	alg;
+	struct qcedev_sha_ctxt		ctxt;
+};
+
+
+#define QCEDEV_IOC_MAGIC	0x87
+
+#define QCEDEV_IOCTL_ENC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_DEC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_SHA_INIT_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_UPDATE_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_FINAL_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_GET_SHA_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_LOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 7)
+#define QCEDEV_IOCTL_UNLOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 8)
+#define QCEDEV_IOCTL_GET_CMAC_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_cipher_op_req)
+#endif /* _QCEDEV__H */
diff --git a/drivers/crypto/msm/inc/qcryptohw_30.h b/drivers/crypto/msm/inc/qcryptohw_30.h
new file mode 100644
index 0000000..edbee71
--- /dev/null
+++ b/drivers/crypto/msm/inc/qcryptohw_30.h
@@ -0,0 +1,308 @@
+/* Copyright (c)2009- 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_
+
+#define QCE_AUTH_REG_BYTE_COUNT 2
+#define CRYPTO_DATA_IN_REG			0x0
+#define CRYPTO_DATA_OUT_REG			0x10
+#define CRYPTO_STATUS_REG			0x20
+#define CRYPTO_CONFIG_REG			0x24
+#define CRYPTO_DEBUG_REG			0x28
+#define CRYPTO_REGISTER_LOCK_REG		0x2C
+#define CRYPTO_SEG_CFG_REG			0x30
+#define CRYPTO_ENCR_SEG_CFG_REG			0x34
+#define CRYPTO_AUTH_SEG_CFG_REG			0x38
+#define CRYPTO_SEG_SIZE_REG			0x3C
+#define CRYPTO_GOPROC_REG			0x40
+#define CRYPTO_ENGINES_AVAIL			0x44
+
+#define CRYPTO_DES_KEY0_REG			0x50
+#define CRYPTO_DES_KEY1_REG			0x54
+#define CRYPTO_DES_KEY2_REG			0x58
+#define CRYPTO_DES_KEY3_REG			0x5C
+#define CRYPTO_DES_KEY4_REG			0x60
+#define CRYPTO_DES_KEY5_REG			0x64
+
+#define CRYPTO_CNTR0_IV0_REG			0x70
+#define CRYPTO_CNTR1_IV1_REG			0x74
+#define CRYPTO_CNTR2_IV2_REG			0x78
+#define CRYPTO_CNTR3_IV3_REG			0x7C
+#define CRYPTO_CNTR_MASK_REG			0x80
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x90
+#define CRYPTO_AUTH_BYTECNT1_REG		0x94
+#define CRYPTO_AUTH_BYTECNT2_REG		0x98
+#define CRYPTO_AUTH_BYTECNT3_REG		0x9C
+
+#define CRYPTO_AUTH_IV0_REG			0x100
+#define CRYPTO_AUTH_IV1_REG			0x104
+#define CRYPTO_AUTH_IV2_REG			0x108
+#define CRYPTO_AUTH_IV3_REG			0x10C
+#define CRYPTO_AUTH_IV4_REG			0x110
+#define CRYPTO_AUTH_IV5_REG			0x114
+#define CRYPTO_AUTH_IV6_REG			0x118
+#define CRYPTO_AUTH_IV7_REG			0x11C
+#define CRYPTO_AUTH_IV8_REG			0x120
+#define CRYPTO_AUTH_IV9_REG			0x124
+#define CRYPTO_AUTH_IV10_REG			0x128
+#define CRYPTO_AUTH_IV11_REG			0x12C
+#define CRYPTO_AUTH_IV12_REG			0x130
+#define CRYPTO_AUTH_IV13_REG			0x134
+#define CRYPTO_AUTH_IV14_REG			0x138
+#define CRYPTO_AUTH_IV15_REG			0x13C
+
+#define CRYPTO_AES_RNDKEY0			0x200
+#define CRYPTO_AES_RNDKEY1			0x204
+#define CRYPTO_AES_RNDKEY2			0x208
+#define CRYPTO_AES_RNDKEY3			0x20C
+#define CRYPTO_AES_RNDKEY4			0x210
+#define CRYPTO_AES_RNDKEY5			0x214
+#define CRYPTO_AES_RNDKEY6			0x218
+#define CRYPTO_AES_RNDKEY7			0x21C
+#define CRYPTO_AES_RNDKEY8			0x220
+#define CRYPTO_AES_RNDKEY9			0x224
+#define CRYPTO_AES_RNDKEY10			0x228
+#define CRYPTO_AES_RNDKEY11			0x22c
+#define CRYPTO_AES_RNDKEY12			0x230
+#define CRYPTO_AES_RNDKEY13			0x234
+#define CRYPTO_AES_RNDKEY14			0x238
+#define CRYPTO_AES_RNDKEY15			0x23C
+#define CRYPTO_AES_RNDKEY16			0x240
+#define CRYPTO_AES_RNDKEY17			0x244
+#define CRYPTO_AES_RNDKEY18			0x248
+#define CRYPTO_AES_RNDKEY19			0x24C
+#define CRYPTO_AES_RNDKEY20			0x250
+#define CRYPTO_AES_RNDKEY21			0x254
+#define CRYPTO_AES_RNDKEY22			0x258
+#define CRYPTO_AES_RNDKEY23			0x25C
+#define CRYPTO_AES_RNDKEY24			0x260
+#define CRYPTO_AES_RNDKEY25			0x264
+#define CRYPTO_AES_RNDKEY26			0x268
+#define CRYPTO_AES_RNDKEY27			0x26C
+#define CRYPTO_AES_RNDKEY28			0x270
+#define CRYPTO_AES_RNDKEY29			0x274
+#define CRYPTO_AES_RNDKEY30			0x278
+#define CRYPTO_AES_RNDKEY31			0x27C
+#define CRYPTO_AES_RNDKEY32			0x280
+#define CRYPTO_AES_RNDKEY33			0x284
+#define CRYPTO_AES_RNDKEY34			0x288
+#define CRYPTO_AES_RNDKEY35			0x28c
+#define CRYPTO_AES_RNDKEY36			0x290
+#define CRYPTO_AES_RNDKEY37			0x294
+#define CRYPTO_AES_RNDKEY38			0x298
+#define CRYPTO_AES_RNDKEY39			0x29C
+#define CRYPTO_AES_RNDKEY40			0x2A0
+#define CRYPTO_AES_RNDKEY41			0x2A4
+#define CRYPTO_AES_RNDKEY42			0x2A8
+#define CRYPTO_AES_RNDKEY43			0x2AC
+#define CRYPTO_AES_RNDKEY44			0x2B0
+#define CRYPTO_AES_RNDKEY45			0x2B4
+#define CRYPTO_AES_RNDKEY46			0x2B8
+#define CRYPTO_AES_RNDKEY47			0x2BC
+#define CRYPTO_AES_RNDKEY48			0x2C0
+#define CRYPTO_AES_RNDKEY49			0x2C4
+#define CRYPTO_AES_RNDKEY50			0x2C8
+#define CRYPTO_AES_RNDKEY51			0x2CC
+#define CRYPTO_AES_RNDKEY52			0x2D0
+#define CRYPTO_AES_RNDKEY53			0x2D4
+#define CRYPTO_AES_RNDKEY54			0x2D8
+#define CRYPTO_AES_RNDKEY55			0x2DC
+#define CRYPTO_AES_RNDKEY56			0x2E0
+#define CRYPTO_AES_RNDKEY57			0x2E4
+#define CRYPTO_AES_RNDKEY58			0x2E8
+#define CRYPTO_AES_RNDKEY59			0x2EC
+
+#define CRYPTO_DATA_SHADOW0			0x8000
+#define CRYPTO_DATA_SHADOW8191			0x8FFC
+
+/* status reg  */
+#define CRYPTO_CORE_REV				28	/* bit 31-28 */
+#define CRYPTO_CORE_REV_MASK			(0xf << CRYPTO_CORE_REV)
+#define CRYPTO_DOUT_SIZE_AVAIL			22	/* bit 24-22 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			19	/* bit 21-19 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL			18
+#define CRYPTO_SEG_CHNG_ERR			17
+#define CRYPTO_CFH_CHNG_ERR			16
+#define CRYPTO_DOUT_ERR				15
+#define CRYPTO_DIN_ERR				14
+#define CRYPTO_LOCKED				13
+#define CRYPTO_CRYPTO_STATE			10	/* bit 12-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0x7 << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_AUTH_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_AUTH_DONE			1
+#define CRYPTO_SW_ERR				0
+
+#define CRYPTO_CRYPTO_STATE_IDLE		0
+#define CRYPTO_CRYPTO_STATE_LOCKED		1
+#define CRYPTO_CRYPTO_STATE_GO			3
+#define CRYPTO_CRYPTO_STATE_PROCESSING		4
+#define CRYPTO_CRYPTO_STATE_FINAL_READ		5
+#define CRYPTO_CRYPTO_STATE_CTXT_CLEARING	6
+#define CRYPTO_CRYPTO_STATE_UNLOCKING		7
+
+/* config reg */
+#define CRYPTO_HIGH_SPD_HASH_EN_N		15
+#define CRYPTO_HIGH_SPD_OUT_EN_N		14
+#define CRYPTO_HIGH_SPD_IN_EN_N			13
+#define CRYPTO_DBG_EN				12
+#define CRYPTO_DBG_SEL				7	/* bit 11:7 */
+#define CRYPTO_DBG_SEL_MASK			(0x1F << CRYPTO_DBG_SEL)
+#define CRYPTO_MASK_DOUT_INTR			6
+#define CRYPTO_MASK_DIN_INTR			5
+#define CRYPTO_MASK_AUTH_DONE_INTR		4
+#define CRYPTO_MASK_ERR_INTR			3
+#define CRYPTO_AUTO_SHUTDOWN_EN			2
+#define CRYPTO_CLK_EN_N				1
+#define CRYPTO_SW_RST				0
+
+/* seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		25
+#define CRYPTO_F9_DIRECTION			24
+#define CRYPTO_F8_DIRECTION			23
+#define CRYPTO_USE_HW_KEY			22
+
+#define CRYPTO_CNTR_ALG				20	/* bit 21-20 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << efine CRYPTO_CNTR_ALG)
+
+#define CRYPTO_CLR_CNTXT			19
+#define CRYPTO_LAST				18
+#define CRYPTO_FIRST				17
+#define CRYPTO_ENCODE				16
+
+#define CRYPTO_AUTH_POS				14	/* bit 15-14 */
+#define CRYPTO_AUTH_POS_MASK			(3 << CRYPTO_AUTH_POS)
+
+#define CRYPTO_AUTH_SIZE			11	/* bit 13-11 */
+#define CRYPTO_AUTH_SIZE_MASK			(7 << CRYPTO_AUTH_SIZE)
+
+#define CRYPTO_AUTH_ALG				9	/* bit 10-9 */
+#define CRYPTO_AUTH_ALG_MASK			(3 << CRYPTO_AUTH_ALG)
+
+#define CRYPTO_ENCR_MODE			6	/* bit 8-6 */
+#define CRYPTO_ENCR_MODE_MASK			(7 << CRYPTO_ENCR_MODE)
+
+#define CRYPTO_ENCR_KEY_SZ			3	/* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+
+#define CRYPTO_ENCR_ALG				0	/* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+
+#define CRYPTO_CNTR_ALG_NIST			0
+#define CRYPTO_CNTR_ALG_UMB			1
+#define CRYPTO_CNTR_ALG_VAR2			2
+
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE_SHA1			0
+#define CRYPTO_AUTH_SIZE_SHA256			1
+#define CRYPTO_AUTH_SIZE_SHA384			2
+#define CRYPTO_AUTH_SIZE_SHA512			3
+#define CRYPTO_AUTH_SIZE_HMAC_SHA1		4
+
+#define CRYPTO_AUTH_SIZE_UIA1			0
+#define CRYPTO_AUTH_SIZE_UIA2			1
+
+#define CRYPTO_AUTH_ALG_NONE			0
+#define CRYPTO_AUTH_ALG_SHA			1
+#define CRYPTO_AUTH_ALG_F9			2
+#define CRYPTO_AUTH_ALG_RESERVED1		3
+
+#define CRYPTO_ENCR_MODE_ECB			0
+#define CRYPTO_ENCR_MODE_CBC			1
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_CTR			2
+
+
+#define CRYPTO_ENCR_KEY_SZ_DES			0
+#define CRYPTO_ENCR_KEY_SZ_3DES			1
+
+#define CRYPTO_ENCR_KEY_SZ_AES128		0
+#define CRYPTO_ENCR_KEY_SZ_AES192		1
+#define CRYPTO_ENCR_KEY_SZ_AES256		2
+
+#define CRYPTO_ENCR_KEY_SZ_UEA1			0
+#define CRYPTO_ENCR_KEY_SZ_UEA2			1
+
+#define CRYPTO_ENCR_ALG_NONE			0
+#define CRYPTO_ENCR_ALG_DES			1
+#define CRYPTO_ENCR_ALG_AES			2
+#define CRYPTO_ENCR_ALG_C2			3
+#define CRYPTO_ENCR_ALG_F8			4
+
+/* encr_seg_cfg reg */
+#define CRYPTO_ENCR_SEG_SIZE			16	/* bit 31-16  */
+#define CRYPTO_ENCR_SEG_SIZE_MASK		(0xffff << CRYPTO_ENCR_SEG_SIZE)
+
+#define CRYPTO_ENCR_START			0
+#define CRYPTO_ENCR_START_MASK			(0xffff << CRYPTO_ENCR_START)
+
+/* auth_seg_cfg reg */
+#define CRYPTO_AUTH_SEG_SIZE			16	/* bit 31-16  */
+#define CRYPTO_AUTH_SEG_SIZE_MASK		(0xffff << CRYPTO_AUTH_SEG_SIZE)
+
+#define CRYPTO_AUTH_START			0
+#define CRYPTO_AUTH_START_MASK			(0xffff << CRYPTO_AUTH_START)
+
+
+/* seg_size reg */
+#define CRYPTO_SEG_SIZE				0
+#define CRYPTO_SEG_SIZE_MASK			(0xffff << CRYPTO_SEG_SIZE)
+
+/* goproc reg */
+#define CRYPTO_GO				0
+
+/* engines_avail */
+#define CRYPTO_F9_SEL				8
+#define CRYPTO_F8_SEL				7
+#define CRYPTO_HMAC_SEL				6
+#define CRYPTO_SHA512_SEL			5
+#define CRYPTO_SHA_SEL				4
+#define CRYPTO_DES_SEL				3
+#define CRYPTO_C2_SEL				2
+
+#define CRYPTO_AES_SEL				0	/* bit 1-0 */
+#define CRYPTO_AES_SEL_MASK			(3 <<  CRYPTO_AES_SEL)
+#define CRYPTO_AES_SEL_NO			0
+#define CRYPTO_AES_SEL_SLOW			1
+#define CRYPTO_AES_SEL_FAST			2
+#define CRYPTO_AES_SEL_RESERVED			3
+
+/*  F8 definition of CRYPTO_CNTR1_IV1_REG  */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT		16	/* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+		(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER		0	/* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+		(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4_REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS	0	/* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+		(0x7  << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* misc  */
+#define CRYPTO_AES_RNDKEYS			60
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_ */
diff --git a/drivers/crypto/msm/inc/qcryptohw_40.h b/drivers/crypto/msm/inc/qcryptohw_40.h
new file mode 100644
index 0000000..367bdaa
--- /dev/null
+++ b/drivers/crypto/msm/inc/qcryptohw_40.h
@@ -0,0 +1,316 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_
+
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x0
+#define CRYPTO_DATA_IN_REG			0x008
+#define CRYPTO_DATA_OUT_REG			0x010
+#define CRYPTO_STATUS_REG			0x100
+#define CRYPTO_ENGINES_AVAIL			0x104
+#define CRYPTO3_VERSION_REG			0x108
+#define CRYPTO_SEG_SIZE_REG			0x200
+#define CRYPTO_GOPROC_REG			0x204
+#define CRYPTO_ENCR_SEG_CFG_REG			0x300
+
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x304
+#define CRYPTO_ENCR_SEG_START_REG		0x308
+
+#define CRYPTO_ENCR_KEY0_REG			0x310
+#define CRYPTO_ENCR_KEY1_REG			0x314
+#define CRYPTO_ENCR_KEY2_REG			0x318
+#define CRYPTO_ENCR_KEY3_REG			0x31C
+#define CRYPTO_ENCR_KEY4_REG			0x320
+#define CRYPTO_ENCR_KEY5_REG			0x324
+#define CRYPTO_ENCR_KEY6_REG			0x328
+#define CRYPTO_ENCR_KEY7_REG			0x32C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x330
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x334
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x338
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x33C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x340
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x344
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x348
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x34C
+
+#define CRYPTO_CNTR0_IV0_REG			0x350
+#define CRYPTO_CNTR1_IV1_REG			0x354
+#define CRYPTO_CNTR2_IV2_REG			0x358
+#define CRYPTO_CNTR3_IV3_REG			0x35C
+
+#define CRYPTO_CNTR_MASK_REG			0x360
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0x364
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x400
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x404
+#define CRYPTO_AUTH_SEG_START_REG		0x408
+
+#define CRYPTO_AUTH_KEY0_REG			0x410
+#define CRYPTO_AUTH_KEY1_REG			0x414
+#define CRYPTO_AUTH_KEY2_REG			0x418
+#define CRYPTO_AUTH_KEY3_REG			0x41C
+#define CRYPTO_AUTH_KEY4_REG			0x420
+#define CRYPTO_AUTH_KEY5_REG			0x424
+#define CRYPTO_AUTH_KEY6_REG			0x428
+#define CRYPTO_AUTH_KEY7_REG			0x42C
+#define CRYPTO_AUTH_KEY8_REG			0x430
+#define CRYPTO_AUTH_KEY9_REG			0x434
+#define CRYPTO_AUTH_KEY10_REG			0x438
+#define CRYPTO_AUTH_KEY11_REG			0x43C
+#define CRYPTO_AUTH_KEY12_REG			0x440
+#define CRYPTO_AUTH_KEY13_REG			0x444
+#define CRYPTO_AUTH_KEY14_REG			0x448
+#define CRYPTO_AUTH_KEY15_REG			0x44C
+
+#define CRYPTO_AUTH_IV0_REG			0x450
+#define CRYPTO_AUTH_IV1_REG			0x454
+#define CRYPTO_AUTH_IV2_REG			0x458
+#define CRYPTO_AUTH_IV3_REG			0x45C
+#define CRYPTO_AUTH_IV4_REG			0x460
+#define CRYPTO_AUTH_IV5_REG			0x464
+#define CRYPTO_AUTH_IV6_REG			0x468
+#define CRYPTO_AUTH_IV7_REG			0x46C
+#define CRYPTO_AUTH_IV8_REG			0x470
+#define CRYPTO_AUTH_IV9_REG			0x474
+#define CRYPTO_AUTH_IV10_REG			0x478
+#define CRYPTO_AUTH_IV11_REG			0x47C
+#define CRYPTO_AUTH_IV12_REG			0x480
+#define CRYPTO_AUTH_IV13_REG			0x484
+#define CRYPTO_AUTH_IV14_REG			0x488
+#define CRYPTO_AUTH_IV15_REG			0x48C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x490
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x494
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x498
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x49C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x4A0
+#define CRYPTO_AUTH_BYTECNT1_REG		0x4A4
+#define CRYPTO_AUTH_BYTECNT2_REG		0x4A8
+#define CRYPTO_AUTH_BYTECNT3_REG		0x4AC
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x4B0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x4B4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x4B8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x4BC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x4C0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x4C4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x4C8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x4CC
+
+#define CRYPTO_CONFIG_REG			0x500
+#define CRYPTO_SACR_REG				0x504
+#define CRYPTO_DEBUG_REG			0x508
+
+#define CRYPTO_DATA_SHADOW0			0x8000
+#define CRYPTO_DATA_SHADOW8191			0x8FFC
+
+
+/* Register bits */
+
+#define CRYPTO_CORE_MAJOR_REV			4 /* bit 7-4 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		(0xF << CRYPTO_CORE_MAJOR_REV)
+#define CRYPTO_CORE_MINOR_REV			0 /* bit 3-0 */
+#define CRYPTO_CORE_MINOR_REV_MASK		(0xF << CRYPTO_CORE_MINOR_REV)
+#define CRYPTO_CORE_REV_MASK			0xFF
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			25
+#define CRYPTO_DOUT_SIZE_AVAIL			22 /* bit 24-22 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			19 /* bit 21-19 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x7 << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL			18
+#define CRYPTO_SEG_CHNG_ERR			17
+#define CRYPTO_CFH_CHNG_ERR			16
+#define CRYPTO_DOUT_ERR				15
+#define CRYPTO_DIN_ERR				14
+#define CRYPTO_LOCKED				13
+#define CRYPTO_CRYPTO_STATE			10 /* bit 12-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0x7 << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				30 /* bit 31-30 */
+#define CRYPTO_REQ_SIZE_MASK			(0x3 << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_16_BYTES	0
+#define CRYPTO_REQ_SIZE_ENUM_32_BYTES	1
+#define CRYPTO_REQ_SIZE_ENUM_64_BYTES	2
+
+#define CRYPTO_MAX_QUEUED_REQ			27 /* bit 29-27 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM1_QUEUED_REQS		0
+#define CRYPTO_ENUM2_QUEUED_REQS		1
+#define CRYPTO_ENUM3_QUEUED_REQS		2
+#define CRYPTO_ENUM4_QUEUED_REQS		3
+
+#define CRYPTO_FIFO_THRESHOLD			24 /* bit 26-24 */
+#define CRYPTO_FIFO_THRESHOLD_MASK		(0x7 << CRYPTO_FIFO_THRESHOLD)
+#define CRYPTO_FIFO_ENUM_16_BYTES		0
+#define CRYPTO_FIFO_ENUM_32_BYTES		1
+#define CRYPTO_FIFO_ENUM_48_BYTES		2
+#define CRYPTO_FIFO_ENUM_64_BYTES		3
+
+#define CRYPTO_IRQ_ENABLES			20	/* bit 23-20 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_ACR_EN				18
+#define CRYPTO_BAM_MODE				17
+#define CRYPTO_LITTLE_ENDIAN_MODE		16
+#define CRYPTO_HIGH_SPD_OUT_EN_N		14
+#define CRYPTO_HIGH_SPD_IN_EN_N			13
+#define CRYPTO_DBG_EN				12
+
+#define CRYPTO_DBG_SEL				7 /* bit 11:7 */
+#define CRYPTO_DBG_SEL_MASK			(0x1F << CRYPTO_DBG_SEL)
+
+#define CRYPTO_MASK_DOUT_INTR			6
+#define CRYPTO_MASK_DIN_INTR			5
+#define CRYPTO_MASK_OP_DONE_INTR		4
+#define CRYPTO_MASK_ERR_INTR			3
+#define CRYPTO_AUTO_SHUTDOWN_EN			2
+#define CRYPTO_CLK_EN_N				1
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			20
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			19
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		16
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+					(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_HW_KEY_AUTH			15
+
+#define CRYPTO_LAST				14
+
+#define CRYPTO_AUTH_POS				12 /* bit 13 .. 12*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 11 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x7 << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1			0
+#define CRYPTO_AUTH_SIZE_SHA256			1
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES		0
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES		1
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES		2
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES		3
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES		4
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES		5
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES		6
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH			0
+#define CRYPTO_AUTH_MODE_HMAC			1
+#define CRYPTO_AUTH_MODE_CCM			0
+#define CRYPTO_AUTH_MODE_CMAC			1
+
+#define CRYPTO_AUTH_KEY_SIZE			3
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128		0
+#define CRYPTO_AUTH_KEY_SZ_AES256		2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE			0
+#define CRYPTO_AUTH_ALG_SHA			1
+#define CRYPTO_AUTH_ALG_AES			2
+#define CRYPTO_AUTH_ALG_KASUMI			3
+#define CRYPTO_AUTH_ALG_SNOW3G			4
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		15
+#define CRYPTO_F8_KEYSTREAM_DISABLED		0
+#define CRYPTO_F8_KEYSTREAM_ENABLED		1
+
+#define CRYPTO_F8_DIRECTION			14
+#define CRYPTO_F8_DIRECTION_UPLINK		0
+#define CRYPTO_F8_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_USE_HW_KEY_ENCR			13
+#define CRYPTO_USE_HW_KEY_REG			0
+#define CRYPTO_USE_HW_KEY			1
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST			0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB			0
+#define CRYPTO_ENCR_MODE_CBC			1
+#define CRYPTO_ENCR_MODE_CTR			2
+#define CRYPTO_ENCR_MODE_XTS			3
+#define CRYPTO_ENCR_MODE_CCM			4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES			0
+#define CRYPTO_ENCR_KEY_SZ_3DES			1
+#define CRYPTO_ENCR_KEY_SZ_AES128		0
+#define CRYPTO_ENCR_KEY_SZ_AES256		2
+#define CRYPTO_ENCR_KEY_SZ_UEA1			0
+#define CRYPTO_ENCR_KEY_SZ_UEA2			1
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE			0
+#define CRYPTO_ENCR_ALG_DES			1
+#define CRYPTO_ENCR_ALG_AES			2
+#define CRYPTO_ENCR_ALG_KASUMI			3
+#define CRYPTO_ENCR_ALG_SNOW_3G			5
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				3
+#define CRYPTO_ENCR_SNOW3G_SEL			4
+#define CRYPTO_ENCR_KASUMI_SEL			5
+#define CRYPTO_SHA_SEL				6
+#define CRYPTO_SHA512_SEL			7
+#define CRYPTO_AUTH_AES_SEL			8
+#define CRYPTO_AUTH_SNOW3G_SEL			9
+#define CRYPTO_AUTH_KASUMI_SEL			10
+#define CRYPTO_BAM_SEL				11
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_ */
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
new file mode 100644
index 0000000..516253a
--- /dev/null
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -0,0 +1,731 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Qualcomm Over the Air (OTA) Crypto driver */
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+
+#include <linux/qcota.h>
+#include "inc/qce_ota.h"
+#include "inc/qce.h"
+
+enum qce_ota_oper_enum {
+	QCE_OTA_F8_OPER   = 0,
+	QCE_OTA_MPKT_F8_OPER = 1,
+	QCE_OTA_F9_OPER  = 2,
+	QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+	struct list_head list;
+	struct completion complete;
+	int err;
+	enum qce_ota_oper_enum op;
+	union {
+		struct qce_f9_req f9_req;
+		struct qce_f8_req f8_req;
+		struct qce_f8_multi_pkt_req f8_mp_req;
+	} req;
+
+	struct ota_dev_control  *podev;
+};
+
+/*
+ * Register ourselves as a misc device to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV	"qcota"
+
+
+struct ota_dev_control {
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned magic;
+
+	struct list_head ready_commands;
+	struct ota_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+			  unsigned cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_dev_control *podev);
+
+static const struct file_operations qcota_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcota_ioctl,
+	.open = qcota_open,
+	.release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev[] = {
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota0",
+			.fops = &qcota_fops,
+		},
+		.magic = OTA_MAGIC,
+	},
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota1",
+			.fops = &qcota_fops,
+		},
+		.magic = OTA_MAGIC,
+	},
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota2",
+			.fops = &qcota_fops,
+		},
+		.magic = OTA_MAGIC,
+	}
+};
+
+#define MAX_OTA_DEVICE ARRAY_SIZE(qcota_dev)
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+	u32 f8_req;
+	u32 f8_mp_req;
+	u32 f9_req;
+	u32 f8_op_success;
+	u32 f8_op_fail;
+	u32 f8_mp_op_success;
+	u32 f8_mp_op_fail;
+	u32 f9_op_success;
+	u32 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat[MAX_OTA_DEVICE];
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota[MAX_OTA_DEVICE];
+
+static struct ota_dev_control *qcota_minor_to_control(unsigned n)
+{
+	int i;
+
+	for (i = 0; i < MAX_OTA_DEVICE; i++) {
+		if (qcota_dev[i].miscdevice.minor == n)
+			return &qcota_dev[i];
+	}
+	return NULL;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev = qcota_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct ota_dev_control *podev = (struct ota_dev_control *)data;
+	struct ota_async_req *areq;
+	unsigned long flags;
+	struct ota_async_req *new_req = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+again:
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct ota_async_req, list);
+		list_del(&new_req->list);
+		podev->active_command = new_req;
+		new_req->err = 0;
+		ret = start_req(podev);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (areq)
+		complete(&areq->complete);
+
+	if (new_req && ret) {
+		complete(&new_req->complete);
+		spin_lock_irqsave(&podev->lock, flags);
+		podev->active_command = NULL;
+		areq = NULL;
+		ret = 0;
+		new_req = NULL;
+		goto again;
+	}
+
+	return;
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_dev_control *podev;
+	struct qcota_stat *pstat;
+
+	podev = areq->podev;
+	pstat = &_qcota_stat[podev->pdev->id];
+	areq->req.f9_req.mac_i  = (uint32_t) icv;
+
+	if (ret)
+		areq->err = -ENXIO;
+	else
+		areq->err = 0;
+
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_dev_control *podev;
+	struct qcota_stat *pstat;
+
+	podev = areq->podev;
+	pstat = &_qcota_stat[podev->pdev->id];
+
+	if (ret)
+		areq->err = -ENXIO;
+	else
+		areq->err = 0;
+
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_req(struct ota_dev_control *podev)
+{
+	struct ota_async_req *areq;
+	struct qce_f9_req *pf9;
+	struct qce_f8_multi_pkt_req *p_mp_f8;
+	struct qce_f8_req *pf8;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	areq = podev->active_command;
+	areq->podev = podev;
+
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		pf8 = &areq->req.f8_req;
+		ret = qce_f8_req(podev->qce, pf8, areq, f8_cb);
+		break;
+	case QCE_OTA_MPKT_F8_OPER:
+		p_mp_f8 = &areq->req.f8_mp_req;
+		ret = qce_f8_multi_pkt_req(podev->qce, p_mp_f8, areq, f8_cb);
+		break;
+
+	case QCE_OTA_F9_OPER:
+		pf9 = &areq->req.f9_req;
+		ret =  qce_f9_req(podev->qce, pf9, areq, f9_cb);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	};
+	areq->err = ret;
+	return ret;
+};
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qcota_stat *pstat;
+
+	areq->err = 0;
+	spin_lock_irqsave(&podev->lock, flags);
+	if (podev->active_command == NULL) {
+		podev->active_command = areq;
+		ret = start_req(podev);
+	} else {
+		list_add_tail(&areq->list, &podev->ready_commands);
+	}
+
+	if (ret != 0)
+		podev->active_command = NULL;
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (ret == 0)
+		wait_for_completion(&areq->complete);
+
+	pstat = &_qcota_stat[podev->pdev->id];
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		if (areq->err)
+			pstat->f8_op_fail++;
+		else
+			pstat->f8_op_success++;
+		break;
+
+	case QCE_OTA_MPKT_F8_OPER:
+
+		if (areq->err)
+			pstat->f8_mp_op_fail++;
+		else
+			pstat->f8_mp_op_success++;
+		break;
+
+	case QCE_OTA_F9_OPER:
+	default:
+		if (areq->err)
+			pstat->f9_op_fail++;
+		else
+			pstat->f9_op_success++;
+		break;
+	};
+
+	return areq->err;
+};
+
+static long qcota_ioctl(struct file *file,
+			  unsigned cmd, unsigned long arg)
+{
+	int err = 0;
+	struct ota_dev_control *podev;
+	uint8_t *user_src;
+	uint8_t *user_dst;
+	uint8_t *k_buf = NULL;
+	struct ota_async_req areq;
+	uint32_t total;
+	struct qcota_stat *pstat;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&areq.complete);
+
+	pstat = &_qcota_stat[podev->pdev->id];
+
+	switch (cmd) {
+	case QCOTA_F9_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f9_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
+				     sizeof(struct qce_f9_req)))
+			return -EFAULT;
+
+		user_src = areq.req.f9_req.message;
+		if (!access_ok(VERIFY_READ, (void __user *)user_src,
+			       areq.req.f9_req.msize))
+			return -EFAULT;
+
+		k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		if (__copy_from_user(k_buf, (void __user *)user_src,
+				areq.req.f9_req.msize)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		areq.req.f9_req.message = k_buf;
+		areq.op = QCE_OTA_F9_OPER;
+
+		pstat->f9_req++;
+		err = submit_req(&areq, podev);
+
+		areq.req.f9_req.message = user_src;
+		if (err == 0 && __copy_to_user((void __user *)arg,
+				&areq.req.f9_req, sizeof(struct qce_f9_req))) {
+			err = -EFAULT;
+		}
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
+				     sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		total = areq.req.f8_req.data_len;
+		user_src = areq.req.f8_req.data_in;
+		if (user_src != NULL) {
+			if (!access_ok(VERIFY_READ, (void __user *)
+					user_src, total))
+				return -EFAULT;
+
+		};
+
+		user_dst = areq.req.f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (user_src && __copy_from_user(k_buf,
+				(void __user *)user_src, total)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		if (user_src)
+			areq.req.f8_req.data_in = k_buf;
+		else
+			areq.req.f8_req.data_in = NULL;
+		areq.req.f8_req.data_out = k_buf;
+
+		areq.op = QCE_OTA_F8_OPER;
+
+		pstat->f8_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+
+		break;
+
+	case QCOTA_F8_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+				     sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+
+		total = areq.req.f8_mp_req.num_pkt *
+				areq.req.f8_mp_req.qce_f8_req.data_len;
+
+		user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+		if (!access_ok(VERIFY_READ, (void __user *)
+				user_src, total))
+			return -EFAULT;
+
+		user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
+			kfree(k_buf);
+
+			return -EFAULT;
+		}
+
+		areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+		areq.op = QCE_OTA_MPKT_F8_OPER;
+
+		pstat->f8_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct ota_dev_control *podev;
+	struct ce_hw_support ce_support;
+
+	if (pdev->id >= MAX_OTA_DEVICE) {
+		pr_err("%s: device id %d  exceeds allowed %d\n",
+			__func__, pdev->id, MAX_OTA_DEVICE);
+		return -ENOENT;
+	}
+
+	podev = &qcota_dev[pdev->id];
+
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+	spin_lock_init(&podev->lock);
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		pr_err("%s: device id %d, can not open qce\n",
+			__func__, pdev->id);
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+	if (qce_hw_support(handle, &ce_support) < 0 ||
+					ce_support.ota == false) {
+		pr_err("%s: device id %d, qce does not support ota capability\n",
+			__func__, pdev->id);
+		rc = -ENODEV;
+		goto err;
+	}
+	podev->qce = handle;
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+
+	rc = misc_register(&podev->miscdevice);
+	if (rc < 0)
+		goto err;
+
+	return 0;
+err:
+	if (handle)
+		qce_close(handle);
+	platform_set_drvdata(pdev, NULL);
+	podev->qce = NULL;
+	podev->pdev = NULL;
+	return rc;
+};
+
+static int qcota_remove(struct platform_device *pdev)
+{
+	struct ota_dev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+	if (podev->qce)
+		qce_close(podev->qce);
+
+	if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&podev->miscdevice);
+	tasklet_kill(&podev->done_tasklet);
+	return 0;
+};
+
+static struct platform_driver qcota_plat_driver = {
+	.probe = qcota_probe,
+	.remove = qcota_remove,
+	.driver = {
+		.name = "qcota",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcota_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcota_stat[id];
+	len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm OTA crypto accelerator %d Statistics:\n",
+				id + 1);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 request             : %d\n",
+					pstat->f8_req);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation success   : %d\n",
+					pstat->f8_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation fail      : %d\n",
+					pstat->f8_op_fail);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP request          : %d\n",
+					pstat->f8_mp_req);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation success: %d\n",
+					pstat->f8_mp_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation fail   : %d\n",
+					pstat->f8_mp_op_fail);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 request             : %d\n",
+					pstat->f9_req);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation success   : %d\n",
+					pstat->f9_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation fail      : %d\n",
+					pstat->f9_op_fail);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcota = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcota);
+
+	rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+
+	int qcota = *((int *) file->private_data);
+
+	memset((char *)&_qcota_stat[qcota], 0, sizeof(struct qcota_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	int i;
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcota", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcota debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	for (i = 0; i < MAX_OTA_DEVICE; i++) {
+		snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
+		_debug_qcota[i] = i;
+		dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcota[i], &_debug_stats_ops);
+		if (dent == NULL) {
+			pr_err("qcota debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+			rc = PTR_ERR(dent);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init qcota_init(void)
+{
+	int rc;
+
+	rc = _qcota_debug_init();
+	if (rc)
+		return rc;
+	return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Ota Crypto driver");
+MODULE_VERSION("1.01");
+
+module_init(qcota_init);
+module_exit(qcota_exit);
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
new file mode 100644
index 0000000..b945d24
--- /dev/null
+++ b/drivers/crypto/msm/qce.c
@@ -0,0 +1,2607 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+
+#include <linux/qcota.h>
+#include <mach/dma.h>
+
+#include "inc/qce.h"
+#include "inc/qcedev.h"
+#include "inc/qcryptohw_30.h"
+#include "inc/qce_ota.h"
+
+/* ADM definitions */
+#define LI_SG_CMD  (1 << 31)    /* last index in the scatter gather cmd */
+#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
+#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
+#define ADM_DESC_LAST  (1 << 31)
+
+/* Data xfer between DM and CE in blocks of 16 bytes */
+#define ADM_CE_BLOCK_SIZE  16
+
+/* Data xfer between DM and CE in blocks of 64 bytes */
+#define ADM_SHA_BLOCK_SIZE  64
+
+#define ADM_DESC_LENGTH_MASK 0xffff
+#define ADM_DESC_LENGTH(x)  (x & ADM_DESC_LENGTH_MASK)
+
+struct dmov_desc {
+	uint32_t addr;
+	uint32_t len;
+};
+
+#define ADM_STATUS_OK 0x80000002
+
+/* Misc definitions */
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+
+/* State of DM channel */
+enum qce_chan_st_enum {
+	QCE_CHAN_STATE_IDLE = 0,
+	QCE_CHAN_STATE_IN_PROG = 1,
+	QCE_CHAN_STATE_COMP = 2,
+	QCE_CHAN_STATE_LAST
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+	struct clk *ce_clk;	    /* Handle to CE clk */
+	unsigned int crci_in;	      /* CRCI for CE DM IN Channel   */
+	unsigned int crci_out;	      /* CRCI for CE DM OUT Channel   */
+	unsigned int crci_hash;	      /* CRCI for CE HASH   */
+	unsigned int chan_ce_in;      /* ADM channel used for CE input
+					* and auth result if authentication
+					* only operation. */
+	unsigned int chan_ce_out;     /* ADM channel used for CE output,
+					and icv for esp */
+
+
+	unsigned int *cmd_pointer_list_ce_in;
+	dma_addr_t  phy_cmd_pointer_list_ce_in;
+
+	unsigned int *cmd_pointer_list_ce_out;
+	dma_addr_t  phy_cmd_pointer_list_ce_out;
+
+	unsigned char *cmd_list_ce_in;
+	dma_addr_t  phy_cmd_list_ce_in;
+
+	unsigned char *cmd_list_ce_out;
+	dma_addr_t  phy_cmd_list_ce_out;
+
+	struct dmov_desc *ce_out_src_desc;
+	dma_addr_t  phy_ce_out_src_desc;
+
+	struct dmov_desc *ce_out_dst_desc;
+	dma_addr_t  phy_ce_out_dst_desc;
+
+	struct dmov_desc *ce_in_src_desc;
+	dma_addr_t  phy_ce_in_src_desc;
+
+	struct dmov_desc *ce_in_dst_desc;
+	dma_addr_t  phy_ce_in_dst_desc;
+
+	unsigned char *ce_out_ignore;
+	dma_addr_t phy_ce_out_ignore;
+
+	unsigned char *ce_pad;
+	dma_addr_t phy_ce_pad;
+
+	struct msm_dmov_cmd  *chan_ce_in_cmd;
+	struct msm_dmov_cmd  *chan_ce_out_cmd;
+
+	uint32_t ce_out_ignore_size;
+
+	int ce_out_dst_desc_index;
+	int ce_in_src_desc_index;
+
+	enum qce_chan_st_enum chan_ce_in_state;		/* chan ce_in state */
+	enum qce_chan_st_enum chan_ce_out_state;	/* chan ce_out state */
+
+	int chan_ce_in_status;		/* chan ce_in status      */
+	int chan_ce_out_status;		/* chan ce_out status */
+
+
+	unsigned char *dig_result;
+	dma_addr_t phy_dig_result;
+
+	/* cached aes key */
+	uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
+
+	uint32_t aes_key_size;		/* cached aes key size in bytes */
+	int fastaes;			/* ce supports fast aes */
+	int hmac;			/* ce support hmac-sha1 */
+	bool ota;			/* ce support ota */
+
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+
+	int assoc_nents;
+	int src_nents;
+	int dst_nents;
+
+	void *areq;
+	enum qce_cipher_mode_enum mode;
+
+	dma_addr_t phy_iv_in;
+	dma_addr_t phy_ota_src;
+	dma_addr_t phy_ota_dst;
+	unsigned int ota_size;
+	int err;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
+static const uint32_t _s_box[256] = {
+	0x63, 0x7c, 0x77, 0x7b,   0xf2, 0x6b, 0x6f, 0xc5,
+	0x30, 0x01, 0x67, 0x2b,   0xfe, 0xd7, 0xab, 0x76,
+
+	0xca, 0x82, 0xc9, 0x7d,   0xfa, 0x59, 0x47, 0xf0,
+	0xad, 0xd4, 0xa2, 0xaf,   0x9c, 0xa4, 0x72, 0xc0,
+
+	0xb7, 0xfd, 0x93, 0x26,   0x36, 0x3f, 0xf7, 0xcc,
+	0x34, 0xa5, 0xe5, 0xf1,   0x71, 0xd8, 0x31, 0x15,
+
+	0x04, 0xc7, 0x23, 0xc3,   0x18, 0x96, 0x05, 0x9a,
+	0x07, 0x12, 0x80, 0xe2,   0xeb, 0x27, 0xb2, 0x75,
+
+	0x09, 0x83, 0x2c, 0x1a,   0x1b, 0x6e, 0x5a, 0xa0,
+	0x52, 0x3b, 0xd6, 0xb3,   0x29, 0xe3, 0x2f, 0x84,
+
+	0x53, 0xd1, 0x00, 0xed,   0x20, 0xfc, 0xb1, 0x5b,
+	0x6a, 0xcb, 0xbe, 0x39,   0x4a, 0x4c, 0x58, 0xcf,
+
+	0xd0, 0xef, 0xaa, 0xfb,   0x43, 0x4d, 0x33, 0x85,
+	0x45, 0xf9, 0x02, 0x7f,   0x50, 0x3c, 0x9f, 0xa8,
+
+	0x51, 0xa3, 0x40, 0x8f,   0x92, 0x9d, 0x38, 0xf5,
+	0xbc, 0xb6, 0xda, 0x21,   0x10, 0xff, 0xf3, 0xd2,
+
+	0xcd, 0x0c, 0x13, 0xec,   0x5f, 0x97, 0x44, 0x17,
+	0xc4, 0xa7, 0x7e, 0x3d,   0x64, 0x5d, 0x19, 0x73,
+
+	0x60, 0x81, 0x4f, 0xdc,   0x22, 0x2a, 0x90, 0x88,
+	0x46, 0xee, 0xb8, 0x14,   0xde, 0x5e, 0x0b, 0xdb,
+
+	0xe0, 0x32, 0x3a, 0x0a,   0x49, 0x06, 0x24, 0x5c,
+	0xc2, 0xd3, 0xac, 0x62,   0x91, 0x95, 0xe4, 0x79,
+
+	0xe7, 0xc8, 0x37, 0x6d,   0x8d, 0xd5, 0x4e, 0xa9,
+	0x6c, 0x56, 0xf4, 0xea,   0x65, 0x7a, 0xae, 0x08,
+
+	0xba, 0x78, 0x25, 0x2e,   0x1c, 0xa6, 0xb4, 0xc6,
+	0xe8, 0xdd, 0x74, 0x1f,   0x4b, 0xbd, 0x8b, 0x8a,
+
+	0x70, 0x3e, 0xb5, 0x66,   0x48, 0x03, 0xf6, 0x0e,
+	0x61, 0x35, 0x57, 0xb9,   0x86, 0xc1, 0x1d, 0x9e,
+
+	0xe1, 0xf8, 0x98, 0x11,   0x69, 0xd9, 0x8e, 0x94,
+	0x9b, 0x1e, 0x87, 0xe9,   0xce, 0x55, 0x28, 0xdf,
+
+	0x8c, 0xa1, 0x89, 0x0d,   0xbf, 0xe6, 0x42, 0x68,
+	0x41, 0x99, 0x2d, 0x0f,   0xb0, 0x54, 0xbb, 0x16 };
+
+
+/*
+ *	Source:	FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
+ *		Expansion.
+ */
+static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
+		uint32_t *AES_RND_KEY)
+{
+	uint32_t i;
+	uint32_t Nk;
+	uint32_t Nr, rot_data;
+	uint32_t Rcon = 0x01000000;
+	uint32_t temp;
+	uint32_t data_in;
+	uint32_t MSB_store;
+	uint32_t byte_for_sub;
+	uint32_t word_sub[4];
+
+	switch (keysize) {
+	case 192:
+		Nk = 6;
+		Nr = 12;
+		break;
+
+	case 256:
+		Nk = 8;
+		Nr = 14;
+		break;
+
+	case 128:
+	default:  /* default to AES128 */
+		Nk = 4;
+		Nr = 10;
+		break;
+	}
+
+	/* key expansion */
+	i = 0;
+	while (i < Nk) {
+		AES_RND_KEY[i] = AES_KEY[i];
+		i = i + 1;
+	}
+
+	i = Nk;
+	while (i < (4 * (Nr + 1))) {
+		temp = AES_RND_KEY[i-1];
+		if (Nr == 14) {
+			switch (i) {
+			case 8:
+				Rcon = 0x01000000;
+				break;
+
+			case 16:
+				Rcon = 0x02000000;
+				break;
+
+			case 24:
+				Rcon = 0x04000000;
+				break;
+
+			case 32:
+				Rcon = 0x08000000;
+				break;
+
+			case 40:
+				Rcon = 0x10000000;
+				break;
+
+			case 48:
+				Rcon = 0x20000000;
+				break;
+
+			case 56:
+				Rcon = 0x40000000;
+				break;
+			}
+		} else if (Nr == 12) {
+			switch (i) {
+			case  6:
+				Rcon = 0x01000000;
+				break;
+
+			case 12:
+				Rcon = 0x02000000;
+				break;
+
+			case 18:
+				Rcon = 0x04000000;
+				break;
+
+			case 24:
+				Rcon = 0x08000000;
+				break;
+
+			case 30:
+				Rcon = 0x10000000;
+				break;
+
+			case 36:
+				Rcon = 0x20000000;
+				break;
+
+			case 42:
+				Rcon = 0x40000000;
+				break;
+
+			case 48:
+				Rcon = 0x80000000;
+				break;
+			}
+		} else if (Nr == 10) {
+			switch (i) {
+			case 4:
+				Rcon = 0x01000000;
+				break;
+
+			case 8:
+				Rcon = 0x02000000;
+				break;
+
+			case 12:
+				Rcon = 0x04000000;
+				break;
+
+			case 16:
+				Rcon = 0x08000000;
+				break;
+
+			case 20:
+				Rcon = 0x10000000;
+				break;
+
+			case 24:
+				Rcon = 0x20000000;
+				break;
+
+			case 28:
+				Rcon = 0x40000000;
+				break;
+
+			case 32:
+				Rcon = 0x80000000;
+				break;
+
+			case 36:
+				Rcon = 0x1b000000;
+				break;
+
+			case 40:
+				Rcon = 0x36000000;
+				break;
+			}
+		}
+
+		if ((i % Nk) == 0) {
+			data_in   = temp;
+			MSB_store = (data_in >> 24 & 0xff);
+			rot_data  = (data_in << 8) | MSB_store;
+			byte_for_sub = rot_data;
+			word_sub[0] = _s_box[(byte_for_sub & 0xff)];
+			word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
+								<< 8);
+			word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
+								<< 16);
+			word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
+								>> 24)] << 24);
+			word_sub[0] =  word_sub[0] | word_sub[1] | word_sub[2] |
+							word_sub[3];
+			temp = word_sub[0] ^ Rcon;
+		} else if ((Nk > 6) && ((i % Nk) == 4)) {
+			byte_for_sub = temp;
+			word_sub[0] = _s_box[(byte_for_sub & 0xff)];
+			word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
+								<< 8);
+			word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
+								<< 16);
+			word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
+								 24)] << 24);
+			word_sub[0] =  word_sub[0] | word_sub[1] | word_sub[2] |
+						word_sub[3];
+			temp = word_sub[0];
+		}
+
+		AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
+		i = i+1;
+	}
+}
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
+						struct scatterlist *sg)
+{
+	int i = 0;
+	for (i = 0; i < entries; i++) {
+
+		sg->dma_address = (dma_addr_t)pmem->offset;
+		sg++;
+		pmem++;
+	}
+	return 0;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+	unsigned int rev;
+	unsigned int eng_availability;	/* engine available functions    */
+
+	val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if ((val & 0xfffffff) != 0x0200004) {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x 0x%x\n",
+				pce_dev->phy_iobase, val);
+		return -EIO;
+	};
+	rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
+	if (rev == 0x2) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 3e device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else if (rev == 0x1) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 3 device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else if (rev == 0x0) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 2 device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x\n",
+				pce_dev->phy_iobase);
+		return -EIO;
+	}
+
+	eng_availability = readl_relaxed(pce_dev->iobase +
+						CRYPTO_ENGINES_AVAIL);
+
+	if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
+			== CRYPTO_AES_SEL_FAST)
+		pce_dev->fastaes = 1;
+	else
+		pce_dev->fastaes = 0;
+
+	if (eng_availability & (1 << CRYPTO_HMAC_SEL))
+		pce_dev->hmac = 1;
+	else
+		pce_dev->hmac = 0;
+
+	if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
+			(eng_availability & (1 << CRYPTO_F8_SEL)))
+		pce_dev->ota = true;
+	else
+		pce_dev->ota = false;
+
+	pce_dev->aes_key_size = 0;
+
+	return 0;
+};
+
+static int _init_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+
+	/* reset qce */
+	writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
+
+	/* Ensure previous instruction (write to reset bit)
+	 * was completed.
+	 */
+	mb();
+	/* configure ce */
+	val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
+			(1 << CRYPTO_MASK_AUTH_DONE_INTR) |
+					(1 << CRYPTO_MASK_ERR_INTR);
+	writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
+
+	if (_probe_ce_engine(pce_dev) < 0)
+		return -EIO;
+	if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x\n",
+				pce_dev->phy_iobase);
+		return -EIO;
+	};
+	return 0;
+};
+
+static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int rc;
+	int i;
+	uint32_t cfg = 0;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		diglen = SHA1_DIGEST_SIZE;
+		break;
+	case QCE_HASH_SHA256:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/*
+	 * write 20/32 bytes, 5/8 words into auth_iv
+	 *  for SHA1/SHA256
+	 */
+
+	if (sreq->first_blk) {
+		if (sreq->alg == QCE_HASH_SHA1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+
+	rc = clk_enable(pce_dev->ce_clk);
+	if (rc)
+		return rc;
+
+	writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+	writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+	writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+	writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+	writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+
+	if (sreq->alg == QCE_HASH_SHA256) {
+		writel_relaxed(auth32[5], pce_dev->iobase +
+							CRYPTO_AUTH_IV5_REG);
+		writel_relaxed(auth32[6], pce_dev->iobase +
+							CRYPTO_AUTH_IV6_REG);
+		writel_relaxed(auth32[7], pce_dev->iobase +
+							CRYPTO_AUTH_IV7_REG);
+	}
+	/* write auth_bytecnt 0/1, start with 0 */
+	writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG);
+	writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write auth_seg_cfg */
+	writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
+			pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/*
+	 * write seg_cfg
+	 */
+
+	if (sreq->alg == QCE_HASH_SHA1)
+		cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
+	else
+		cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
+
+	if (sreq->first_blk)
+		cfg |= 1 << CRYPTO_FIRST;
+	if (sreq->last_blk)
+		cfg |= 1 << CRYPTO_LAST;
+	cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/* Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+
+	return 0;
+}
+
+static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
+		uint32_t totallen, uint32_t coffset)
+{
+	uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0};
+	uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
+	int aes_key_chg;
+	int i, rc;
+	uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
+	uint32_t cfg;
+	uint32_t ivsize = q_req->ivsize;
+
+	rc = clk_enable(pce_dev->ce_clk);
+	if (rc)
+		return rc;
+
+	cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
+	if (q_req->op == QCE_REQ_AEAD) {
+
+		/* do authentication setup */
+
+		cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
+
+		/* write sha1 init vector */
+		writel_relaxed(_std_init_vector_sha1[0],
+				pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+		writel_relaxed(_std_init_vector_sha1[1],
+				pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+		writel_relaxed(_std_init_vector_sha1[2],
+				pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+		writel_relaxed(_std_init_vector_sha1[3],
+				pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+		writel_relaxed(_std_init_vector_sha1[4],
+				pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+		/* write hmac key */
+		_byte_stream_to_net_words(hmackey, q_req->authkey,
+						q_req->authklen);
+		writel_relaxed(hmackey[0], pce_dev->iobase +
+							CRYPTO_AUTH_IV5_REG);
+		writel_relaxed(hmackey[1], pce_dev->iobase +
+							CRYPTO_AUTH_IV6_REG);
+		writel_relaxed(hmackey[2], pce_dev->iobase +
+							CRYPTO_AUTH_IV7_REG);
+		writel_relaxed(hmackey[3], pce_dev->iobase +
+							CRYPTO_AUTH_IV8_REG);
+		writel_relaxed(hmackey[4], pce_dev->iobase +
+							CRYPTO_AUTH_IV9_REG);
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+		/* write auth_seg_cfg */
+		writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
+				pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	}
+
+	_byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
+
+	switch (q_req->mode) {
+	case QCE_MODE_ECB:
+		cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CBC:
+		cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CTR:
+	default:
+		cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+		break;
+	}
+	pce_dev->mode = q_req->mode;
+
+	switch (q_req->alg) {
+	case CIPHER_ALG_DES:
+		if (q_req->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		writel_relaxed(enckey32[0], pce_dev->iobase +
+							CRYPTO_DES_KEY0_REG);
+		writel_relaxed(enckey32[1], pce_dev->iobase +
+							CRYPTO_DES_KEY1_REG);
+		cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_3DES:
+		if (q_req->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		writel_relaxed(enckey32[0], pce_dev->iobase +
+							CRYPTO_DES_KEY0_REG);
+		writel_relaxed(enckey32[1], pce_dev->iobase +
+							CRYPTO_DES_KEY1_REG);
+		writel_relaxed(enckey32[2], pce_dev->iobase +
+							CRYPTO_DES_KEY2_REG);
+		writel_relaxed(enckey32[3], pce_dev->iobase +
+							CRYPTO_DES_KEY3_REG);
+		writel_relaxed(enckey32[4], pce_dev->iobase +
+							CRYPTO_DES_KEY4_REG);
+		writel_relaxed(enckey32[5], pce_dev->iobase +
+							CRYPTO_DES_KEY5_REG);
+		cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_AES:
+	default:
+		if (q_req->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+			writel_relaxed(enciv32[2], pce_dev->iobase +
+						CRYPTO_CNTR2_IV2_REG);
+			writel_relaxed(enciv32[3], pce_dev->iobase +
+						CRYPTO_CNTR3_IV3_REG);
+		}
+		/* set number of counter bits */
+		writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+
+		if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+			cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} else {
+			switch (q_req->encklen) {
+			case AES128_KEY_SIZE:
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+							CRYPTO_ENCR_KEY_SZ);
+				break;
+			case AES192_KEY_SIZE:
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
+							CRYPTO_ENCR_KEY_SZ);
+				break;
+			case AES256_KEY_SIZE:
+			default:
+				cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ);
+
+				/* check for null key. If null, use hw key*/
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i] != 0)
+						break;
+				}
+				if (i == enck_size_in_word)
+					cfg |= 1 << CRYPTO_USE_HW_KEY;
+				break;
+			} /* end of switch (q_req->encklen) */
+
+			cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+			if (pce_dev->aes_key_size !=  q_req->encklen)
+				aes_key_chg = 1;
+			else {
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i] != pce_dev->aeskey[i])
+						break;
+				}
+				aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
+			}
+
+			if (aes_key_chg) {
+				if (pce_dev->fastaes) {
+					for (i = 0; i < enck_size_in_word;
+									i++) {
+						writel_relaxed(enckey32[i],
+							pce_dev->iobase +
+							CRYPTO_AES_RNDKEY0 +
+							(i * sizeof(uint32_t)));
+					}
+				} else {
+					/* size in bit */
+					_aes_expand_key_schedule(
+						q_req->encklen * 8,
+						enckey32, aes_round_key);
+
+					for (i = 0; i < CRYPTO_AES_RNDKEYS;
+									i++) {
+						writel_relaxed(aes_round_key[i],
+							pce_dev->iobase +
+							CRYPTO_AES_RNDKEY0 +
+							(i * sizeof(uint32_t)));
+					}
+				}
+
+				pce_dev->aes_key_size = q_req->encklen;
+				for (i = 0; i < enck_size_in_word; i++)
+					pce_dev->aeskey[i] = enckey32[i];
+			} /*if (aes_key_chg) { */
+		} /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (q_req->mode)  */
+
+	if (q_req->dir == QCE_ENCRYPT)
+		cfg |= (1 << CRYPTO_AUTH_POS);
+	cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+
+	/* write encr seg cfg */
+	writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
+			(coffset & 0xffff),      /* cipher offset */
+			pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write seg cfg and size */
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+	writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/* Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+	struct aead_request *areq;
+	struct crypto_aead *aead;
+	uint32_t ivsize;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+	uint32_t status;
+
+	areq = (struct aead_request *) pce_dev->areq;
+	aead = crypto_aead_reqtfm(areq);
+	ivsize = crypto_aead_ivsize(aead);
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+			ivsize, DMA_TO_DEVICE);
+	dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+			DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
+		return 0;
+	};
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
+				pce_dev->chan_ce_in_status |
+				pce_dev->chan_ce_out_status);
+	} else {
+
+		iv_out[0] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG);
+		iv_out[1] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR1_IV1_REG);
+		iv_out[2] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR2_IV2_REG);
+		iv_out[3] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR3_IV3_REG);
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
+				pce_dev->chan_ce_in_status |
+				pce_dev->chan_ce_out_status);
+	};
+	return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+
+	struct ahash_request *areq;
+	uint32_t auth_data[2];
+	uint32_t status;
+
+	areq = (struct ahash_request *) pce_dev->areq;
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
+		return;
+	};
+
+	auth_data[0] = readl_relaxed(pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG);
+	auth_data[1] = readl_relaxed(pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT1_REG);
+	/* Ensure previous instruction (retriving byte count information)
+	 * was completed before disabling the clk.
+	 */
+	mb();
+	clk_disable(pce_dev->ce_clk);
+	pce_dev->qce_cb(areq,  pce_dev->dig_result, (unsigned char *)auth_data,
+				pce_dev->chan_ce_in_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+	uint32_t status;
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+			pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
+		return 0;
+	};
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		iv_out[0] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG);
+		iv_out[1] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR1_IV1_REG);
+		iv_out[2] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR2_IV2_REG);
+		iv_out[3] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR3_IV3_REG);
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+	uint32_t status;
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
+		return 0;
+	};
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		iv_out[0] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG);
+		iv_out[1] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR1_IV1_REG);
+		iv_out[2] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR2_IV2_REG);
+		iv_out[3] = readl_relaxed(pce_dev->iobase +
+							CRYPTO_CNTR3_IV3_REG);
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		clk_disable(pce_dev->ce_clk);
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+
+
+static int _chain_sg_buffer_in(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen))
+			pdesc->len  = dlen + len;
+		else {
+			pce_dev->ce_in_src_desc_index++;
+			if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+				return -ENOMEM;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_in(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_in_src_desc_index++;
+		if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+			return -ENOMEM;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	return 0;
+}
+
+static void _chain_buffer_in_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_in_src_desc_index = 0;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->len = 0;
+}
+
+static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_in_dst_desc;
+	pdesc->len = ADM_DESC_LAST | total;
+
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	if (ncmd == 1)
+		pcmd->cmd |= CMD_LC;
+	else {
+		dmov_s  *pscmd;
+
+		pcmd->cmd &= ~CMD_LC;
+		pcmd++;
+		pscmd = (dmov_s *)pcmd;
+		pscmd->cmd |= CMD_LC;
+	}
+
+#ifdef QCE_DEBUG
+	dev_info(pce_dev->pdev, "_ce_in_final %d\n",
+					pce_dev->ce_in_src_desc_index);
+#endif
+}
+
+#ifdef QCE_DEBUG
+static void _ce_in_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_in_dump\n");
+	for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
+		pdesc = pce_dev->ce_in_src_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+	pdesc = pce_dev->ce_in_dst_desc;
+	dev_info(pce_dev->pdev, "dst - %x , %x\n", pdesc->addr,
+				pdesc->len);
+};
+
+static void _ce_out_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_out_dump\n");
+	for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
+		pdesc = pce_dev->ce_out_dst_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+	pdesc = pce_dev->ce_out_src_desc;
+	dev_info(pce_dev->pdev, "src - %x , %x\n", pdesc->addr,
+				pdesc->len);
+};
+#endif
+
+static int _chain_sg_buffer_out(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+			pdesc->len  = dlen + len;
+		} else {
+			pce_dev->ce_out_dst_desc_index++;
+			if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+				return -EIO;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_out(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_out_dst_desc_index++;
+		if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+			return -EIO;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	return 0;
+};
+
+static void _chain_buffer_out_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_out_dst_desc_index = 0;
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->len = 0;
+};
+
+static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_out_src_desc;
+	pdesc->len = ADM_DESC_LAST | total;
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	if (ncmd == 1)
+		pcmd->cmd |= CMD_LC;
+	else {
+		dmov_s  *pscmd;
+
+		pcmd->cmd &= ~CMD_LC;
+		pcmd++;
+		pscmd = (dmov_s *)pcmd;
+		pscmd->cmd |= CMD_LC;
+	}
+#ifdef QCE_DEBUG
+	dev_info(pce_dev->pdev, "_ce_out_final %d\n",
+			pce_dev->ce_out_dst_desc_index);
+#endif
+
+};
+
+static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+
+};
+
+static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	_sha_complete(pce_dev);
+};
+
+static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+
+static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static int _setup_cmd_template(struct qce_device *pce_dev)
+{
+	dmov_sg *pcmd;
+	dmov_s  *pscmd;
+	struct dmov_desc *pdesc;
+	unsigned char *vaddr;
+
+	/* Divide up the 4K coherent memory */
+	/* 1. ce_in channel 1st command src descriptors, 128 entries */
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 2. ce_in channel 1st command dst descriptor, 1 entry */
+	pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(struct dmov_desc) ;
+
+	/*
+	 * 3. ce_in channel command list of one scatter gather command
+	 *    and one simple command.
+	 */
+	pce_dev->cmd_list_ce_in = vaddr;
+	pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
+
+	/* 4. authentication result. */
+	pce_dev->dig_result = vaddr;
+	pce_dev->phy_dig_result = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + SHA256_DIGESTSIZE;
+
+	/*
+	 * 5. ce_out channel command list of one scatter gather command
+	 *    and one simple command.
+	 */
+	pce_dev->cmd_list_ce_out = vaddr;
+	pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
+
+	/* 6. ce_out channel command src descriptors, 1 entry */
+	pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(struct dmov_desc) ;
+
+	/* 7. ce_out channel command dst descriptors, 128 entries.  */
+	pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 8. pad area. */
+	pce_dev->ce_pad = vaddr;
+	pce_dev->phy_ce_pad = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + ADM_CE_BLOCK_SIZE;
+
+	/* 9. ce_in channel command pointer list.	 */
+	pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+	vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
+
+	/* 10. ce_ou channel command pointer list. */
+	pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_out =  pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+
+	/* 11. throw away area to store by-pass data from ce_out. */
+	pce_dev->ce_out_ignore = (unsigned char *) vaddr;
+	pce_dev->phy_ce_out_ignore  = pce_dev->coh_pmem
+			+ (vaddr - pce_dev->coh_vmem);
+	pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
+			pce_dev->coh_vmem);  /* at least 1.5 K of space */
+	/*
+	 * The first command of command list ce_in is for the input of
+	 * concurrent operation of encrypt/decrypt or for the input
+	 * of authentication.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	/* swap byte and half word , dst crci ,  scatter gather */
+	pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
+			CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->addr = 0;	/* to be filled in each operation */
+	pdesc->len = 0;		/* to be filled in each operation */
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
+	pdesc = pce_dev->ce_in_dst_desc;
+	pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+	pdesc->len = 0 | ADM_DESC_LAST;	/* to be filled in each operation */
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+	pcmd++;
+	/*
+	 * The second command is for the digested data of
+	 * hashing operation only. For others, this command is not used.
+	 */
+	pscmd = (dmov_s *) pcmd;
+	/* last command, swap byte, half word, src crci, single   */
+	pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
+	pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
+	pscmd->len = SHA256_DIGESTSIZE;	/* to be filled.  */
+	pscmd->dst = (unsigned) pce_dev->phy_dig_result;
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)
+					pce_dev->phy_cmd_list_ce_in));
+	pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
+	pce_dev->chan_ce_in_cmd->exec_func = NULL;
+	pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
+	pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(2,
+			pce_dev->crci_in, pce_dev->crci_hash);
+	/*
+	 * The first command in the command list ce_out.
+	 * It is for encry/decryp output.
+	 * If hashing only, ce_out is not used.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	/* swap byte, half word, source crci, scatter gather */
+	pcmd->cmd =   CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
+	pdesc = pce_dev->ce_out_src_desc;
+	pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+	pdesc->len = 0;  /* to be filled in each opeation */
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->addr = 0;  /* to be filled in each opeation */
+	pdesc->len = 0;  /* to be filled in each opeation */
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+	pcmd++;
+	/*
+	 * The second command is for digested data of esp operation.
+	 * For ciphering, this command is not used.
+	 */
+	pscmd = (dmov_s *) pcmd;
+	/* last command, swap byte, half word, src crci, single   */
+	pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
+	pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
+	pscmd->len = SHA1_DIGESTSIZE;     /* we only support hmac(sha1) */
+	pscmd->dst = (unsigned) pce_dev->phy_dig_result;
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)pce_dev->
+						phy_cmd_list_ce_out));
+
+	pce_dev->chan_ce_out_cmd->user = pce_dev;
+	pce_dev->chan_ce_out_cmd->exec_func = NULL;
+	pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
+	pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(2,
+			pce_dev->crci_out, pce_dev->crci_hash);
+
+
+	return 0;
+};
+
+static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
+{
+
+	if (ce_in)
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_out)
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_in)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
+					pce_dev->chan_ce_in_cmd);
+	if (ce_out)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
+					pce_dev->chan_ce_out_cmd);
+
+	return 0;
+};
+
+static void _f9_complete(struct qce_device *pce_dev)
+{
+	uint32_t mac_i;
+	uint32_t status;
+
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				pce_dev->ota_size, DMA_TO_DEVICE);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+		return;
+	};
+
+	mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+	pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
+				pce_dev->chan_ce_in_status);
+};
+
+static void _f8_complete(struct qce_device *pce_dev)
+{
+	uint32_t status;
+
+	if (pce_dev->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+				pce_dev->ota_size, DMA_FROM_DEVICE);
+	if (pce_dev->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
+				DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+	/* check ce error status */
+	status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	if (status & (1 << CRYPTO_SW_ERR)) {
+		pce_dev->err++;
+		dev_err(pce_dev->pdev,
+			"Qualcomm Crypto Error at 0x%x, status%x\n",
+			pce_dev->phy_iobase, status);
+		_init_ce_engine(pce_dev);
+		pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
+		return;
+	};
+
+	pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
+				pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+};
+
+
+static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	_f9_complete(pce_dev);
+};
+
+static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						 result);
+		pce_dev->chan_ce_in_status = -1;
+	} else
+		pce_dev->chan_ce_in_status = 0;
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_f8_complete(pce_dev);
+	}
+};
+
+static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_f8_complete(pce_dev);
+	}
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req)
+{
+	uint32_t cfg;
+	uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
+
+	_byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
+	writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
+	writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
+	writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
+	writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
+	writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
+
+	writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+	writel_relaxed(req->count_i, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write auth_seg_cfg */
+	writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
+			pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write seg_cfg */
+	cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
+			(1 << CRYPTO_LAST);
+
+	if (req->algorithm == QCE_OTA_ALGO_KASUMI)
+		cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
+	else
+		cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ;
+
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= 1 << CRYPTO_F9_DIRECTION;
+
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+
+	/*
+	 * barrier to ensure previous instructions
+	 * (including GO) to CE finish before issue DMA transfer
+	 * request.
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+		bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+		uint16_t cipher_size)
+{
+	uint32_t cfg;
+	uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
+
+	if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
+				(req->bearer >= QCE_OTA_MAX_BEARER))
+		return -EINVAL;
+
+	/*  write seg_cfg */
+	cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
+				(1 << CRYPTO_LAST);
+	if (req->algorithm == QCE_OTA_ALGO_KASUMI)
+		cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
+	else
+		cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ;
+	if (key_stream_mode)
+		cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= 1 << CRYPTO_F8_DIRECTION;
+	writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* write 0 to auth_size, auth_offset */
+	writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write encr_seg_cfg seg_size, seg_offset */
+	writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
+			(cipher_offset & 0xffff),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write keys */
+	_byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
+	writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
+	writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
+	writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
+	writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
+
+	/* write cntr0_iv0 for countC */
+	writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
+
+	/* write cntr1_iv1 for nPkts, and bearer */
+	if (npkts == 1)
+		npkts = 0;
+	writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+			npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+				pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+
+	/*
+	 * barrier to ensure previous instructions
+	 * (including GO) to CE finish before issue DMA transfer
+	 * request.
+	 */
+	mb();
+	return 0;
+};
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	uint32_t ivsize = crypto_aead_ivsize(aead);
+	uint32_t totallen;
+	uint32_t pad_len;
+	uint32_t authsize = crypto_aead_authsize(aead);
+	int rc = 0;
+
+	q_req->ivsize = ivsize;
+	if (q_req->dir == QCE_ENCRYPT)
+		q_req->cryptlen = areq->cryptlen;
+	else
+		q_req->cryptlen = areq->cryptlen - authsize;
+
+	totallen = q_req->cryptlen + ivsize + areq->assoclen;
+	pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->assoc_nents = 0;
+	pce_dev->phy_iv_in = 0;
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+
+	pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+	dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+					 DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher iv for input                                 */
+	pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
+			ivsize, DMA_TO_DEVICE);
+	if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* for output, ignore associated data and cipher iv */
+	if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
+						ivsize + areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher input       */
+	pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher output      */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
+		dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* pad data      */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
+	_ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
+
+	/* set up crypto device */
+	rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = q_req->areq;
+	pce_dev->qce_cb = q_req->qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
+	pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
+
+	rc = _qce_start_dma(pce_dev, true, true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->assoc_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+				DMA_TO_DEVICE);
+	}
+	if (pce_dev->phy_iv_in) {
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+	}
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (pce_dev->dst_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+
+	uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
+						- areq->nbytes;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+	/* cipher input       */
+	pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+	if (c_req->use_pmem != 1)
+		dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	else
+		dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
+								areq->src);
+
+	if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher output      */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+		if (c_req->use_pmem != 1)
+			dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+							DMA_FROM_DEVICE);
+		else
+			dma_map_pmem_sg(&c_req->pmem->dst[0],
+					pce_dev->dst_nents, areq->dst);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* pad data      */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
+	_ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
+
+#ifdef QCE_DEBUG
+	_ce_in_dump(pce_dev);
+	_ce_out_dump(pce_dev);
+#endif
+	/* set up crypto device */
+	rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = c_req->qce_cb;
+	if (c_req->use_pmem == 1) {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back_pmem;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back_pmem;
+	} else {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back;
+	}
+	rc = _qce_start_dma(pce_dev, true, true);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (c_req->use_pmem != 1) {
+		if (pce_dev->dst_nents) {
+			dma_unmap_sg(pce_dev->pdev, areq->dst,
+				pce_dev->dst_nents, DMA_FROM_DEVICE);
+		}
+		if (pce_dev->src_nents) {
+			dma_unmap_sg(pce_dev->pdev, areq->src,
+					pce_dev->src_nents,
+					(areq->src == areq->dst) ?
+						DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+		}
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
+	struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+
+	_chain_buffer_in_init(pce_dev);
+	pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+	dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+							DMA_TO_DEVICE);
+
+	if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+				rc = -ENOMEM;
+				goto bad;
+			}
+	}
+	 _ce_in_final(pce_dev, 2, sreq->size + pad_len);
+
+#ifdef QCE_DEBUG
+	_ce_in_dump(pce_dev);
+#endif
+
+	rc =  _sha_ce_setup(pce_dev, sreq);
+
+	if (rc < 0)
+		goto bad;
+
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = sreq->qce_cb;
+	pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
+
+	rc =  _qce_start_dma(pce_dev, true, false);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, sreq->src,
+				pce_dev->src_nents, DMA_TO_DEVICE);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+/*
+ * crypto engine open function.
+ */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	struct resource *resource;
+	struct clk *ce_clk;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		dev_err(&pdev->dev, "Can not allocate memory\n");
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+	ce_clk = clk_get(pce_dev->pdev, "ce_clk");
+	if (IS_ERR(ce_clk)) {
+		*rc = PTR_ERR(ce_clk);
+		return NULL;
+	}
+	pce_dev->ce_clk = ce_clk;
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing MEM resource\n");
+		goto err;
+	};
+	pce_dev->phy_iobase = resource->start;
+	pce_dev->iobase = ioremap_nocache(resource->start,
+				resource->end - resource->start + 1);
+	if (!pce_dev->iobase) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not map io memory\n");
+		goto err;
+	}
+
+	pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	if (pce_dev->chan_ce_in_cmd == NULL ||
+			pce_dev->chan_ce_out_cmd == NULL) {
+		dev_err(pce_dev->pdev, "Can not allocate memory\n");
+		*rc = -ENOMEM;
+		goto err;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+					"crypto_channels");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
+		goto err;
+	};
+	pce_dev->chan_ce_in = resource->start;
+	pce_dev->chan_ce_out = resource->end;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_in");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
+		goto err;
+	};
+	pce_dev->crci_in = resource->start;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_out");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
+		goto err;
+	};
+	pce_dev->crci_out = resource->start;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_hash");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
+		goto err;
+	};
+	pce_dev->crci_hash = resource->start;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
+		goto err;
+	}
+	_setup_cmd_template(pce_dev);
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+	*rc = clk_enable(pce_dev->ce_clk);
+	if (*rc)
+		return NULL;
+
+	if (_init_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		clk_disable(pce_dev->ce_clk);
+		goto err;
+	}
+	*rc = 0;
+	clk_disable(pce_dev->ce_clk);
+
+	pce_dev->err = 0;
+
+	return pce_dev;
+err:
+	if (pce_dev)
+		qce_close(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/*
+ * crypto engine close function.
+ */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
+				pce_dev->coh_pmem);
+	kfree(pce_dev->chan_ce_in_cmd);
+	kfree(pce_dev->chan_ce_out_cmd);
+
+	kfree(handle);
+	clk_put(pce_dev->ce_clk);
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	if (pce_dev->hmac == 1)
+		ce_support->sha1_hmac_20 = true;
+	else
+		ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = false;
+	ce_support->cmac  = false;
+	ce_support->aes_key_192 = true;
+	ce_support->aes_xts  = false;
+	ce_support->aes_ccm  = false;
+	ce_support->ota = pce_dev->ota;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	bool key_stream_mode;
+	dma_addr_t dst;
+	int rc;
+	uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
+						req->data_len;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	key_stream_mode = (req->data_in == NULL);
+
+	/* F8 cipher input       */
+	if (key_stream_mode)
+		pce_dev->phy_ota_src = 0;
+	else {
+		pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+					req->data_in, req->data_len,
+					(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
+				req->data_len) < 0) {
+			pce_dev->phy_ota_dst = 0;
+			rc =  -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* F8 cipher output     */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out,
+				req->data_len, DMA_FROM_DEVICE);
+		pce_dev->phy_ota_dst = dst;
+	} else {
+		dst = pce_dev->phy_ota_src;
+		pce_dev->phy_ota_dst = 0;
+	}
+	if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	pce_dev->ota_size = req->data_len;
+
+	/* pad data      */
+	if (pad_len) {
+		if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
+					pce_dev->phy_ce_pad, pad_len) < 0) {
+			rc =  -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc =  -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	if (!key_stream_mode)
+		_ce_in_final(pce_dev, 1, req->data_len + pad_len);
+	_ce_out_final(pce_dev, 1, req->data_len + pad_len);
+
+	/* set up crypto device */
+	rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = cookie;
+	pce_dev->qce_cb = qce_cb;
+
+	if (!key_stream_mode)
+		pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
+
+	pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
+
+	rc =  _qce_start_dma(pce_dev, !(key_stream_mode), true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
+				req->data_len, DMA_FROM_DEVICE);
+	if (pce_dev->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				req->data_len,
+				(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	uint16_t num_pkt = mreq->num_pkt;
+	uint16_t cipher_start = mreq->cipher_start;
+	uint16_t cipher_size = mreq->cipher_size;
+	struct qce_f8_req *req = &mreq->qce_f8_req;
+	uint32_t total;
+	uint32_t pad_len;
+	dma_addr_t dst = 0;
+	int rc = 0;
+
+	total = num_pkt *  req->data_len;
+	pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	/* F8 cipher input       */
+	pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
+				req->data_in, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
+				total) < 0) {
+		pce_dev->phy_ota_dst = 0;
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* F8 cipher output      */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+						DMA_FROM_DEVICE);
+		pce_dev->phy_ota_dst = dst;
+	} else {
+		dst = pce_dev->phy_ota_src;
+		pce_dev->phy_ota_dst = 0;
+	}
+	if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
+		rc = -ENOMEM;
+		goto  bad;
+	}
+
+	pce_dev->ota_size = total;
+
+	/* pad data      */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+					pad_len) < 0) {
+			rc = -ENOMEM;
+			goto  bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto  bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, 1, total + pad_len);
+	_ce_out_final(pce_dev, 1, total + pad_len);
+
+
+	/* set up crypto device */
+	rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+			cipher_size);
+	if (rc)
+		goto bad ;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = cookie;
+	pce_dev->qce_cb = qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
+	pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
+
+	rc = _qce_start_dma(pce_dev, true, true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
+				DMA_FROM_DEVICE);
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+			qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
+
+	pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+			req->msize, DMA_TO_DEVICE);
+
+	_chain_buffer_in_init(pce_dev);
+	rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
+	if (rc < 0) {
+		rc =  -ENOMEM;
+		goto bad;
+	}
+
+	pce_dev->ota_size = req->msize;
+	if (pad_len) {
+		rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+				pad_len);
+		if (rc < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+	_ce_in_final(pce_dev, 2, req->msize + pad_len);
+	rc = _ce_f9_setup(pce_dev, req);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = cookie;
+	pce_dev->qce_cb = qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
+
+	rc =  _qce_start_dma(pce_dev, true, false);
+	if (rc == 0)
+		return 0;
+bad:
+	dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
+				req->msize, DMA_TO_DEVICE);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Crypto Engine driver");
+MODULE_VERSION("1.11");
+
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
new file mode 100644
index 0000000..7724d67
--- /dev/null
+++ b/drivers/crypto/msm/qce40.c
@@ -0,0 +1,2038 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <mach/dma.h>
+#include <mach/clk.h>
+#include "inc/qce.h"
+#include "inc/qcedev.h"
+#include "inc/qcryptohw_40.h"
+
+/* ADM definitions */
+#define LI_SG_CMD  (1 << 31)    /* last index in the scatter gather cmd */
+#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
+#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
+#define ADM_DESC_LAST  (1 << 31)
+
+/* Data xfer between DM and CE in blocks of 16 bytes */
+#define ADM_CE_BLOCK_SIZE  16
+
+#define ADM_DESC_LENGTH_MASK 0xffff
+#define ADM_DESC_LENGTH(x)  (x & ADM_DESC_LENGTH_MASK)
+
+struct dmov_desc {
+	uint32_t addr;
+	uint32_t len;
+};
+
+#define ADM_STATUS_OK 0x80000002
+
+/* Misc definitions */
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+
+/* State of DM channel */
+enum qce_chan_st_enum {
+	QCE_CHAN_STATE_IDLE = 0,
+	QCE_CHAN_STATE_IN_PROG = 1,
+	QCE_CHAN_STATE_COMP = 2,
+	QCE_CHAN_STATE_LAST
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+	struct clk *ce_core_clk;	    /* Handle to CE clk */
+	struct clk *ce_clk;	    /* Handle to CE clk */
+	unsigned int crci_in;	      /* CRCI for CE DM IN Channel   */
+	unsigned int crci_out;	      /* CRCI for CE DM OUT Channel   */
+	unsigned int chan_ce_in;      /* ADM channel used for CE input
+					* and auth result if authentication
+					* only operation. */
+	unsigned int chan_ce_out;     /* ADM channel used for CE output,
+					and icv for esp */
+	unsigned int *cmd_pointer_list_ce_in;
+	dma_addr_t  phy_cmd_pointer_list_ce_in;
+
+	unsigned int *cmd_pointer_list_ce_out;
+	dma_addr_t  phy_cmd_pointer_list_ce_out;
+
+	unsigned char *cmd_list_ce_in;
+	dma_addr_t  phy_cmd_list_ce_in;
+
+	unsigned char *cmd_list_ce_out;
+	dma_addr_t  phy_cmd_list_ce_out;
+
+	struct dmov_desc *ce_out_src_desc;
+	dma_addr_t  phy_ce_out_src_desc;
+
+	struct dmov_desc *ce_out_dst_desc;
+	dma_addr_t  phy_ce_out_dst_desc;
+
+	struct dmov_desc *ce_in_src_desc;
+	dma_addr_t  phy_ce_in_src_desc;
+
+	struct dmov_desc *ce_in_dst_desc;
+	dma_addr_t  phy_ce_in_dst_desc;
+
+	unsigned char *ce_out_ignore;
+	dma_addr_t phy_ce_out_ignore;
+
+	unsigned char *ce_pad;
+	dma_addr_t phy_ce_pad;
+
+	struct msm_dmov_cmd  *chan_ce_in_cmd;
+	struct msm_dmov_cmd  *chan_ce_out_cmd;
+
+	uint32_t ce_out_ignore_size;
+
+	int ce_out_dst_desc_index;
+	int ce_in_dst_desc_index;
+
+	int ce_out_src_desc_index;
+	int ce_in_src_desc_index;
+
+	enum qce_chan_st_enum chan_ce_in_state;		/* chan ce_in state */
+	enum qce_chan_st_enum chan_ce_out_state;	/* chan ce_out state */
+
+	int chan_ce_in_status;		/* chan ce_in status      */
+	int chan_ce_out_status;		/* chan ce_out status */
+
+	unsigned char *dig_result;
+	dma_addr_t phy_dig_result;
+
+	/* cached aes key */
+	uint32_t cipher_key[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)];
+
+	uint32_t cipher_key_size;	/* cached aes key size in bytes */
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+
+	int assoc_nents;
+	int ivsize;
+	int authsize;
+	int src_nents;
+	int dst_nents;
+
+	void *areq;
+	enum qce_cipher_mode_enum mode;
+
+	dma_addr_t phy_iv_in;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
+						struct scatterlist *sg)
+{
+	int i;
+	for (i = 0; i < entries; i++) {
+
+		sg->dma_address = (dma_addr_t)pmem->offset;
+		sg++;
+		pmem++;
+	}
+	return 0;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+	unsigned int rev;
+
+	val = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	if (((val & 0xfffffff) != 0x0000042) &&
+			((val & 0xfffffff) != 0x0000040)) {
+		dev_err(pce_dev->pdev,
+				"Unknown Qualcomm crypto device at 0x%x 0x%x\n",
+				pce_dev->phy_iobase, val);
+		return -EIO;
+	};
+	rev = (val & CRYPTO_CORE_REV_MASK);
+	if (rev == 0x42) {
+		dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 4.2 device found at 0x%x\n",
+				pce_dev->phy_iobase);
+	} else {
+		if (rev == 0x40) {
+			dev_info(pce_dev->pdev,
+				"Qualcomm Crypto 4.0 device found at 0x%x\n",
+							pce_dev->phy_iobase);
+		}
+	}
+
+	dev_info(pce_dev->pdev,
+			"IO base 0x%x, ce_in channel %d, "
+			"ce_out channel %d, "
+			"crci_in %d, crci_out %d\n",
+			(unsigned int) pce_dev->iobase,
+			pce_dev->chan_ce_in, pce_dev->chan_ce_out,
+			pce_dev->crci_in, pce_dev->crci_out);
+
+	pce_dev->cipher_key_size = 0;
+
+	return 0;
+};
+
+static int _init_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int val;
+
+	/* Reset ce */
+	clk_reset(pce_dev->ce_core_clk, CLK_RESET_ASSERT);
+	clk_reset(pce_dev->ce_core_clk, CLK_RESET_DEASSERT);
+	/*
+	 * Ensure previous instruction (any writes to CLK registers)
+	 * to toggle the CLK reset lines was completed.
+	 */
+	dsb();
+	/* configure ce */
+	val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
+			(1 << CRYPTO_MASK_OP_DONE_INTR) |
+					(1 << CRYPTO_MASK_ERR_INTR);
+	writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
+	/*
+	 * Ensure previous instruction (writel_relaxed to config register bit)
+	 * was completed.
+	 */
+	dsb();
+	val = readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG);
+	if (!val) {
+		dev_err(pce_dev->pdev,
+				"unknown Qualcomm crypto device at 0x%x\n",
+				pce_dev->phy_iobase);
+		return -EIO;
+	};
+	if (_probe_ce_engine(pce_dev) < 0)
+		return -EIO;
+	return 0;
+};
+
+static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t auth_cfg = 0;
+	bool sha1 = false;
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC) {
+		uint32_t authkey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+		uint32_t authklen32 = sreq->authklen/(sizeof(uint32_t));
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			writel_relaxed(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+		_byte_stream_to_net_words(authkey32, sreq->authkey,
+						sreq->authklen);
+		for (i = 0; i < authklen32; i++)
+			writel_relaxed(authkey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
+		/*
+		 * write seg_cfg
+		 */
+		auth_cfg |= (1 << CRYPTO_LAST);
+		auth_cfg |= (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE);
+		auth_cfg |= (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+							CRYPTO_AUTH_SIZE);
+		auth_cfg |= CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG;
+
+		switch (sreq->authklen) {
+		case AES128_KEY_SIZE:
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
+						CRYPTO_AUTH_KEY_SIZE);
+			break;
+		case AES256_KEY_SIZE:
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
+					CRYPTO_AUTH_KEY_SIZE);
+			break;
+		default:
+			break;
+		}
+
+		goto go_proc;
+	}
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+				(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		uint32_t hmackey[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+		uint32_t hmacklen = sreq->authklen/(sizeof(uint32_t));
+
+		_byte_stream_to_net_words(hmackey, sreq->authkey,
+						sreq->authklen);
+		/* write hmac key */
+		for (i = 0; i < hmacklen; i++)
+			writel_relaxed(hmackey[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
+
+		auth_cfg |= (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE);
+	} else {
+		auth_cfg |= (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE);
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	for (i = 0; i < 5; i++)
+		writel_relaxed(auth32[i], (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		writel_relaxed(auth32[5], pce_dev->iobase +
+							CRYPTO_AUTH_IV5_REG);
+		writel_relaxed(auth32[6], pce_dev->iobase +
+							CRYPTO_AUTH_IV6_REG);
+		writel_relaxed(auth32[7], pce_dev->iobase +
+							CRYPTO_AUTH_IV7_REG);
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	for (i = 0; i < 4; i++)
+		writel_relaxed(sreq->auth_data[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t))));
+
+	/* write seg_cfg */
+	if (sha1)
+		auth_cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
+	else
+		auth_cfg |= (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
+
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+
+	auth_cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
+
+go_proc:
+	auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	 /* write seg_cfg */
+	writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write seg_size   */
+	writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* write auth_seg_size */
+	writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	dsb();
+
+	writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+	/*
+	 * Ensure previous instructions (setting all the CE registers)
+	 * was completed before writing to GO register
+	 */
+	dsb();
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	dsb();
+
+	return 0;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = creq->encklen / sizeof(uint32_t);
+	int aes_key_chg;
+	int i;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+
+	if (creq->mode ==  QCE_MODE_XTS)
+		_byte_stream_to_net_words(enckey32, creq->enckey,
+						creq->encklen/2);
+	else
+		_byte_stream_to_net_words(enckey32, creq->enckey,
+							creq->encklen);
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			writel_relaxed(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			writel_relaxed(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+		/* write auth key */
+		for (i = 0; i < authklen32; i++)
+			writel_relaxed(enckey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		for (i = 0; i < noncelen32; i++)
+			writel_relaxed(nonce32[i], pce_dev->iobase +
+				CRYPTO_AUTH_INFO_NONCE0_REG +
+					(i*sizeof(uint32_t)));
+
+		auth_cfg |= (noncelen32 << CRYPTO_AUTH_NONCE_NUM_WORDS);
+		auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+		auth_cfg |= (1 << CRYPTO_LAST);
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= (((creq->authsize >> 1) - 2) << CRYPTO_AUTH_SIZE);
+		auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
+						CRYPTO_AUTH_KEY_SIZE);
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
+							CRYPTO_AUTH_KEY_SIZE);
+		}
+		auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
+		writel_relaxed(auth_cfg, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+		if (creq->dir == QCE_ENCRYPT)
+			writel_relaxed(totallen_in, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_SIZE_REG);
+		else
+			writel_relaxed((totallen_in - creq->authsize),
+				pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	} else {
+		writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	}
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	dsb();
+
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CBC:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_XTS:
+		encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CCM:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
+		break;
+
+	case QCE_MODE_CTR:
+	default:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+		break;
+	}
+	pce_dev->mode = creq->mode;
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		writel_relaxed(enckey32[0], pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG);
+		writel_relaxed(enckey32[1], pce_dev->iobase +
+							CRYPTO_ENCR_KEY1_REG);
+		encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			writel_relaxed(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			writel_relaxed(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		}
+		for (i = 0; i < 6; i++)
+			writel_relaxed(enckey32[0], (pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+
+		encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ)  |
+				(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
+		break;
+
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			_byte_stream_to_net_words(xtskey32, (creq->enckey +
+					creq->encklen/2), creq->encklen/2);
+			for (i = 0; i < xtsklen; i++)
+				writel_relaxed(xtskey32[i], pce_dev->iobase +
+					CRYPTO_ENCR_XTS_KEY0_REG +
+					(i * sizeof(uint32_t)));
+
+				writel_relaxed(creq->cryptlen ,
+						pce_dev->iobase +
+						CRYPTO_ENCR_XTS_DU_SIZE_REG);
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			for (i = 0; i <= 3; i++)
+				writel_relaxed(enciv32[i], pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(i * sizeof(uint32_t)));
+		}
+		/* set number of counter bits */
+		writel_relaxed(0xffffffff, pce_dev->iobase +
+							CRYPTO_CNTR_MASK_REG);
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} else {
+			uint32_t key_size;
+
+			if (creq->mode == QCE_MODE_XTS) {
+				key_size = creq->encklen/2;
+				enck_size_in_word = key_size/sizeof(uint32_t);
+			} else {
+				key_size = creq->encklen;
+			}
+
+			switch (key_size) {
+			case AES128_KEY_SIZE:
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+							CRYPTO_ENCR_KEY_SZ);
+				break;
+			case AES256_KEY_SIZE:
+			default:
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ);
+
+				/* check for null key. If null, use hw key*/
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i] != 0)
+						break;
+				}
+				if (i == enck_size_in_word)
+					encr_cfg |= 1 << CRYPTO_USE_HW_KEY;
+				break;
+			} /* end of switch (creq->encklen) */
+
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+			if (pce_dev->cipher_key_size !=  creq->encklen)
+				aes_key_chg = 1;
+			else {
+				for (i = 0; i < enck_size_in_word; i++) {
+					if (enckey32[i]
+						!= pce_dev->cipher_key[i])
+						break;
+				}
+				aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
+			}
+
+			if (aes_key_chg) {
+				for (i = 0; i < enck_size_in_word; i++)
+					writel_relaxed(enckey32[i],
+							pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG +
+							(i * sizeof(uint32_t)));
+				pce_dev->cipher_key_size = creq->encklen;
+				for (i = 0; i < enck_size_in_word; i++)
+					pce_dev->cipher_key[i] = enckey32[i];
+			} /*if (aes_key_chg) { */
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	/* write encr seg cfg */
+	encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+
+	/* write encr seg cfg */
+	writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg size */
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+		writel_relaxed((creq->cryptlen + creq->authsize),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	else
+		writel_relaxed(creq->cryptlen,
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	/* write encr seg start */
+	writel_relaxed((coffset & 0xffff),
+			pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+	/* write seg size  */
+	writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+	/*
+	 * Ensure previous instructions (setting all the CE registers)
+	 * was completed before writing to GO register
+	 */
+	dsb();
+	/* issue go to crypto   */
+	writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	dsb();
+	return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+	struct aead_request *areq;
+	int i;
+	uint32_t ivsize;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+
+	areq = (struct aead_request *) pce_dev->areq;
+	ivsize = pce_dev->ivsize;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+
+	if (pce_dev->mode != QCE_MODE_CCM)
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+	dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+			DMA_TO_DEVICE);
+
+	/* get iv out */
+	if ((pce_dev->mode == QCE_MODE_ECB) ||
+					(pce_dev->mode == QCE_MODE_CCM)) {
+		if (pce_dev->mode == QCE_MODE_CCM) {
+			int result;
+			result = readl_relaxed(pce_dev->iobase +
+							CRYPTO_STATUS_REG);
+			result &= (1 << CRYPTO_MAC_FAILED);
+			result |= (pce_dev->chan_ce_in_status |
+						pce_dev->chan_ce_out_status);
+			dsb();
+			pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
+								result);
+		} else {
+			pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
+					pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+		}
+	} else {
+		for (i = 0; i < 4; i++)
+			iv_out[i] = readl_relaxed(pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
+				pce_dev->chan_ce_in_status |
+				pce_dev->chan_ce_out_status);
+	};
+	return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+
+	struct ahash_request *areq;
+	uint32_t auth_data[4];
+	uint32_t digest[8];
+	int i;
+
+	areq = (struct ahash_request *) pce_dev->areq;
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				DMA_TO_DEVICE);
+
+	for (i = 0; i < 4; i++)
+		auth_data[i] = readl_relaxed(pce_dev->iobase +
+				(CRYPTO_AUTH_BYTECNT0_REG +
+					i * sizeof(uint32_t)));
+
+	for (i = 0; i < 8; i++)
+		digest[i] = readl_relaxed(pce_dev->iobase +
+			CRYPTO_AUTH_IV0_REG + (i * sizeof(uint32_t)));
+
+	_net_words_to_byte_stream(digest, pce_dev->dig_result,
+						SHA256_DIGEST_SIZE);
+
+	pce_dev->qce_cb(areq,  pce_dev->dig_result, (unsigned char *)auth_data,
+				pce_dev->chan_ce_in_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+			pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		int i;
+
+		for (i = 0; i < 4; i++)
+			iv_out[i] = readl_relaxed(pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	uint32_t iv_out[4];
+	unsigned char iv[4 * sizeof(uint32_t)];
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	/* get iv out */
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	} else {
+		int i;
+
+		for (i = 0; i < 4; i++)
+			iv_out[i] = readl_relaxed(pce_dev->iobase +
+				CRYPTO_CNTR0_IV0_REG + (i * sizeof(uint32_t)));
+
+		_net_words_to_byte_stream(iv_out, iv, sizeof(iv));
+		pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
+					pce_dev->chan_ce_out_status);
+	}
+
+	return 0;
+};
+
+static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
+			unsigned int plen, unsigned int paddr, int *index)
+{
+	while (plen > 0x8000) {
+		pdesc->len = 0x8000;
+		if (paddr > 0) {
+			pdesc->addr = paddr;
+			paddr += 0x8000;
+		}
+		plen -= pdesc->len;
+		if (plen > 0) {
+			*index = (*index) + 1;
+			if ((*index) >= QCE_MAX_NUM_DESC)
+				return -ENOMEM;
+			pdesc++;
+		}
+	}
+	if ((plen > 0) && (plen <= 0x8000)) {
+		pdesc->len = plen;
+		if (paddr > 0)
+			pdesc->addr = paddr;
+	}
+
+	return 0;
+}
+
+static int _chain_sg_buffer_in(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
+	if (nbytes > 0x8000)
+		qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
+				&pce_dev->ce_in_dst_desc_index);
+	else
+		pdesc->len = nbytes;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+						sg_dma_address(sg),
+						&pce_dev->ce_in_src_desc_index);
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+			pdesc->len  = dlen + len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+						pdesc->addr,
+						&pce_dev->ce_in_src_desc_index);
+		} else {
+			pce_dev->ce_in_src_desc_index++;
+			if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+				return -ENOMEM;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+						sg_dma_address(sg),
+						&pce_dev->ce_in_src_desc_index);
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_in(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_in_src_desc_index++;
+		if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
+			return -ENOMEM;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
+	pdesc->len += nbytes;
+
+	return 0;
+}
+
+static void _chain_buffer_in_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_in_src_desc_index = 0;
+	pce_dev->ce_in_dst_desc_index = 0;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->len = 0;
+}
+
+static void _ce_in_final(struct qce_device *pce_dev, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	pcmd->cmd |= CMD_LC;
+}
+
+#ifdef QCE_DEBUG
+static void _ce_in_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
+	for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
+		pdesc = pce_dev->ce_in_src_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+	dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
+	for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
+		pdesc = pce_dev->ce_in_dst_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+};
+
+static void _ce_out_dump(struct qce_device *pce_dev)
+{
+	int i;
+	struct dmov_desc *pdesc;
+
+	dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
+	for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
+		pdesc = pce_dev->ce_out_src_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+
+	dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
+	for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
+		pdesc = pce_dev->ce_out_dst_desc + i;
+		dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
+				pdesc->len);
+	}
+};
+
+#else
+
+static void _ce_in_dump(struct qce_device *pce_dev)
+{
+};
+
+static void _ce_out_dump(struct qce_device *pce_dev)
+{
+};
+
+#endif
+
+static int _chain_sg_buffer_out(struct qce_device *pce_dev,
+		struct scatterlist *sg, unsigned int nbytes)
+{
+	unsigned int len;
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
+	if (nbytes > 0x8000)
+		qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
+				&pce_dev->ce_out_src_desc_index);
+	else
+		pdesc->len = nbytes;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	/*
+	 * Two consective chunks may be handled by the old
+	 * buffer descriptor.
+	 */
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg));
+		dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+		nbytes -= len;
+		if (dlen == 0) {
+			pdesc->addr  = sg_dma_address(sg);
+			pdesc->len = len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+					sg_dma_address(sg),
+					&pce_dev->ce_out_dst_desc_index);
+		} else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
+			pdesc->len  = dlen + len;
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+					pdesc->addr,
+					&pce_dev->ce_out_dst_desc_index);
+
+		} else {
+			pce_dev->ce_out_dst_desc_index++;
+			if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+				return -EIO;
+			pdesc++;
+			pdesc->len = len;
+			pdesc->addr = sg_dma_address(sg);
+			if (pdesc->len > 0x8000)
+				qce_split_and_insert_dm_desc(pdesc, pdesc->len,
+					sg_dma_address(sg),
+					&pce_dev->ce_out_dst_desc_index);
+
+		}
+		if (nbytes > 0)
+			sg = sg_next(sg);
+	}
+	return 0;
+}
+
+static int _chain_pm_buffer_out(struct qce_device *pce_dev,
+		unsigned int pmem, unsigned int nbytes)
+{
+	unsigned int dlen;
+	struct dmov_desc *pdesc;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
+
+	if (dlen == 0) {
+		pdesc->addr  = pmem;
+		pdesc->len = nbytes;
+	} else if (pmem == (pdesc->addr + dlen)) {
+		pdesc->len  = dlen + nbytes;
+	} else {
+		pce_dev->ce_out_dst_desc_index++;
+		if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
+			return -EIO;
+		pdesc++;
+		pdesc->len = nbytes;
+		pdesc->addr = pmem;
+	}
+	pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
+	pdesc->len += nbytes;
+
+	return 0;
+};
+
+static void _chain_buffer_out_init(struct qce_device *pce_dev)
+{
+	struct dmov_desc *pdesc;
+
+	pce_dev->ce_out_dst_desc_index = 0;
+	pce_dev->ce_out_src_desc_index = 0;
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->len = 0;
+};
+
+static void _ce_out_final(struct qce_device *pce_dev, unsigned total)
+{
+	struct dmov_desc *pdesc;
+	dmov_sg *pcmd;
+
+	pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
+	pdesc->len |= ADM_DESC_LAST;
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	pcmd->cmd |= CMD_LC;
+};
+
+static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+							result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+
+};
+
+static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	_sha_complete(pce_dev);
+};
+
+static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+
+static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_in_status = -1;
+	} else {
+		pce_dev->chan_ce_in_status = 0;
+	}
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
+		unsigned int result, struct msm_dmov_errdata *err)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = (struct qce_device *) cmd_ptr->user;
+	if (result != ADM_STATUS_OK) {
+		dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
+						result);
+		pce_dev->chan_ce_out_status = -1;
+	} else {
+		pce_dev->chan_ce_out_status = 0;
+	};
+
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+	if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_use_pmem_complete(pce_dev);
+	}
+};
+
+static int _setup_cmd_template(struct qce_device *pce_dev)
+{
+	dmov_sg *pcmd;
+	struct dmov_desc *pdesc;
+	unsigned char *vaddr;
+	int i = 0;
+
+	/* Divide up the 4K coherent memory */
+
+	/* 1. ce_in channel 1st command src descriptors, 128 entries */
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 2. ce_in channel 1st command dst descriptor, 1 entry */
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
+			 (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 3. ce_in channel command list of one scatter gather command */
+	pce_dev->cmd_list_ce_in = vaddr;
+	pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_sg);
+
+	/* 4. authentication result. */
+	pce_dev->dig_result = vaddr;
+	pce_dev->phy_dig_result = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + SHA256_DIGESTSIZE;
+
+	/* 5. ce_out channel command list of one scatter gather command */
+	pce_dev->cmd_list_ce_out = vaddr;
+	pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(dmov_sg);
+
+	/* 6. ce_out channel command src descriptors, 1 entry */
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 7. ce_out channel command dst descriptors, 128 entries.  */
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
+	pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
+	pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
+			 + (vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
+
+	/* 8. pad area. */
+	pce_dev->ce_pad = vaddr;
+	pce_dev->phy_ce_pad = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+
+	/* Padding length is set to twice for worst case scenario in AES-CCM */
+	vaddr = vaddr + 2 * ADM_CE_BLOCK_SIZE;
+
+	/* 9. ce_in channel command pointer list.	 */
+	vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
+	pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+
+	/* 10. ce_ou channel command pointer list. */
+	vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
+	pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
+	pce_dev->phy_cmd_pointer_list_ce_out =  pce_dev->coh_pmem +
+			(vaddr - pce_dev->coh_vmem);
+	vaddr = vaddr + sizeof(unsigned char *);
+
+	/* 11. throw away area to store by-pass data from ce_out. */
+	pce_dev->ce_out_ignore = (unsigned char *) vaddr;
+	pce_dev->phy_ce_out_ignore  = pce_dev->coh_pmem
+			+ (vaddr - pce_dev->coh_vmem);
+	pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
+			pce_dev->coh_vmem);  /* at least 1.5 K of space */
+	/*
+	 * The first command of command list ce_in is for the input of
+	 * concurrent operation of encrypt/decrypt or for the input
+	 * of authentication.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
+	/* swap byte and half word , dst crci ,  scatter gather */
+	pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
+			CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
+	pdesc = pce_dev->ce_in_src_desc;
+	pdesc->addr = 0;	/* to be filled in each operation */
+	pdesc->len = 0;		/* to be filled in each operation */
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
+
+	pdesc = pce_dev->ce_in_dst_desc;
+	for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
+		pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+		pdesc->len = 0; /* to be filled in each operation */
+		pdesc++;
+	}
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+	pcmd++;
+
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)
+					pce_dev->phy_cmd_list_ce_in));
+	pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
+	pce_dev->chan_ce_in_cmd->exec_func = NULL;
+	pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
+	pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(1,
+			pce_dev->crci_in);
+
+
+	/*
+	 * The first command in the command list ce_out.
+	 * It is for encry/decryp output.
+	 * If hashing only, ce_out is not used.
+	 */
+	pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
+	/* swap byte, half word, source crci, scatter gather */
+	pcmd->cmd =   CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
+			CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
+
+	pdesc = pce_dev->ce_out_src_desc;
+	for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
+		pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
+		pdesc->len = 0;  /* to be filled in each operation */
+		pdesc++;
+	}
+	pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
+
+	pdesc = pce_dev->ce_out_dst_desc;
+	pdesc->addr = 0;  /* to be filled in each operation */
+	pdesc->len = 0;   /* to be filled in each operation */
+	pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
+	pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
+						DST_INDEX_SG_CMD(0);
+
+	pcmd++;
+
+	/* setup command pointer list */
+	*(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
+			DMOV_CMD_ADDR((unsigned int)pce_dev->
+						phy_cmd_list_ce_out));
+
+	pce_dev->chan_ce_out_cmd->user = pce_dev;
+	pce_dev->chan_ce_out_cmd->exec_func = NULL;
+	pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
+			(unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
+	pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(1,
+			pce_dev->crci_out);
+
+	return 0;
+};
+
+static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
+{
+
+	if (ce_in)
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_out)
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
+	else
+		pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
+
+	if (ce_in)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
+					pce_dev->chan_ce_in_cmd);
+	if (ce_out)
+		msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
+					pce_dev->chan_ce_out_cmd);
+
+	return 0;
+};
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	uint32_t totallen_in, totallen_out, out_len;
+	uint32_t pad_len_in, pad_len_out;
+	uint32_t pad_mac_len_out, pad_ptx_len_out;
+	int rc = 0;
+
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+		totallen_in = q_req->cryptlen + areq->assoclen;
+		totallen_out = q_req->cryptlen + authsize + areq->assoclen;
+		out_len = areq->cryptlen + authsize;
+		pad_len_in = ALIGN(totallen_in, ADM_CE_BLOCK_SIZE) -
+								totallen_in;
+		pad_mac_len_out = ALIGN(authsize, ADM_CE_BLOCK_SIZE) -
+								authsize;
+		pad_ptx_len_out = ALIGN(q_req->cryptlen, ADM_CE_BLOCK_SIZE) -
+							q_req->cryptlen;
+		pad_len_out = pad_ptx_len_out + pad_mac_len_out;
+		totallen_out += pad_len_out;
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		totallen_in = areq->cryptlen + areq->assoclen;
+		totallen_out = q_req->cryptlen + areq->assoclen;
+		out_len = areq->cryptlen - authsize;
+		pad_len_in = ALIGN(areq->cryptlen, ADM_CE_BLOCK_SIZE) -
+							areq->cryptlen;
+		pad_len_out = pad_len_in + authsize;
+		totallen_out += pad_len_out;
+	}
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->assoc_nents = 0;
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+	pce_dev->ivsize = q_req->ivsize;
+	pce_dev->authsize = q_req->authsize;
+
+	/* associated data input */
+	pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+	dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+					 DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* cipher input */
+	pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	if (_chain_sg_buffer_in(pce_dev, areq->src, areq->cryptlen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* pad data in */
+	if (pad_len_in) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len_in) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* ignore associated data */
+	if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
+				areq->assoclen) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, out_len);
+		dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, out_len) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+	/* pad data out */
+	if (pad_len_out) {
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len_out) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, ALIGN(totallen_in, ADM_CE_BLOCK_SIZE));
+	_ce_out_final(pce_dev, ALIGN(totallen_out, ADM_CE_BLOCK_SIZE));
+
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = q_req->areq;
+	pce_dev->qce_cb = q_req->qce_cb;
+
+	pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
+	pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
+
+	_ce_in_dump(pce_dev);
+	_ce_out_dump(pce_dev);
+
+	rc = _qce_start_dma(pce_dev, true, true);
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->assoc_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+				DMA_TO_DEVICE);
+	}
+
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (pce_dev->dst_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+
+	uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
+						- areq->nbytes;
+
+	_chain_buffer_in_init(pce_dev);
+	_chain_buffer_out_init(pce_dev);
+
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+
+	/* cipher input */
+	pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+	if (c_req->use_pmem != 1)
+		dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	else
+		dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
+								areq->src);
+
+	if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+		if (c_req->use_pmem != 1)
+			dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+							DMA_FROM_DEVICE);
+		else
+			dma_map_pmem_sg(&c_req->pmem->dst[0],
+					pce_dev->dst_nents, areq->dst);
+	};
+	if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	/* pad data */
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+		if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+			rc = -ENOMEM;
+			goto bad;
+		}
+	}
+
+	/* finalize the ce_in and ce_out channels command lists */
+	_ce_in_final(pce_dev, areq->nbytes + pad_len);
+	_ce_out_final(pce_dev, areq->nbytes + pad_len);
+
+	_ce_in_dump(pce_dev);
+	_ce_out_dump(pce_dev);
+
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to adm */
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = c_req->qce_cb;
+	if (c_req->use_pmem == 1) {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back_pmem;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back_pmem;
+	} else {
+		pce_dev->chan_ce_in_cmd->complete_func =
+					_ablk_cipher_ce_in_call_back;
+		pce_dev->chan_ce_out_cmd->complete_func =
+					_ablk_cipher_ce_out_call_back;
+	}
+	rc = _qce_start_dma(pce_dev, true, true);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (c_req->use_pmem != 1) {
+			if (pce_dev->dst_nents) {
+				dma_unmap_sg(pce_dev->pdev, areq->dst,
+				pce_dev->dst_nents, DMA_FROM_DEVICE);
+			}
+		if (pce_dev->src_nents) {
+			dma_unmap_sg(pce_dev->pdev, areq->src,
+					pce_dev->src_nents,
+					(areq->src == areq->dst) ?
+						DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+		}
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
+	struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+
+	_chain_buffer_in_init(pce_dev);
+	pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+	dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+							DMA_TO_DEVICE);
+
+	if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
+		rc = -ENOMEM;
+		goto bad;
+	}
+
+	if (pad_len) {
+		if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
+						pad_len) < 0) {
+				rc = -ENOMEM;
+				goto bad;
+			}
+	}
+	 _ce_in_final(pce_dev, sreq->size + pad_len);
+
+	_ce_in_dump(pce_dev);
+
+	rc =  _ce_setup_hash(pce_dev, sreq);
+
+	if (rc < 0)
+		goto bad;
+
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = sreq->qce_cb;
+	pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
+
+	rc =  _qce_start_dma(pce_dev, true, false);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, sreq->src,
+				pce_dev->src_nents, DMA_TO_DEVICE);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	struct resource *resource;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		dev_err(&pdev->dev, "Can not allocate memory\n");
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing MEM resource\n");
+		goto err_pce_dev;
+	};
+	pce_dev->phy_iobase = resource->start;
+	pce_dev->iobase = ioremap_nocache(resource->start,
+				resource->end - resource->start + 1);
+	if (!pce_dev->iobase) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not map io memory\n");
+		goto err_pce_dev;
+	}
+
+	pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
+			GFP_KERNEL);
+	if (pce_dev->chan_ce_in_cmd == NULL ||
+			pce_dev->chan_ce_out_cmd == NULL) {
+		dev_err(pce_dev->pdev, "Can not allocate memory\n");
+		*rc = -ENOMEM;
+		goto err_dm_chan_cmd;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+					"crypto_channels");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
+		goto err_dm_chan_cmd;
+	};
+	pce_dev->chan_ce_in = resource->start;
+	pce_dev->chan_ce_out = resource->end;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_in");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
+		goto err_dm_chan_cmd;
+	};
+	pce_dev->crci_in = resource->start;
+	resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+				"crypto_crci_out");
+	if (!resource) {
+		*rc = -ENXIO;
+		dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
+		goto err_dm_chan_cmd;
+	};
+	pce_dev->crci_out = resource->start;
+
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
+		goto err;
+	}
+
+	/* Get CE core clk */
+	ce_core_clk = clk_get(pce_dev->pdev, "ce_clk");
+	if (IS_ERR(ce_core_clk)) {
+		*rc = PTR_ERR(ce_core_clk);
+		goto err;
+	}
+	pce_dev->ce_core_clk = ce_core_clk;
+	/* Get CE clk */
+	ce_clk = clk_get(pce_dev->pdev, "ce_pclk");
+	if (IS_ERR(ce_clk)) {
+		*rc = PTR_ERR(ce_clk);
+		clk_put(pce_dev->ce_core_clk);
+		goto err;
+	}
+	pce_dev->ce_clk = ce_clk;
+
+	/* Enable CE core clk */
+	*rc = clk_enable(pce_dev->ce_core_clk);
+	if (*rc) {
+		clk_put(pce_dev->ce_core_clk);
+		clk_put(pce_dev->ce_clk);
+		goto err;
+	} else {
+		/* Enable CE clk */
+		*rc = clk_enable(pce_dev->ce_clk);
+		if (*rc) {
+			clk_disable(pce_dev->ce_core_clk);
+			clk_put(pce_dev->ce_core_clk);
+			clk_put(pce_dev->ce_clk);
+			goto err;
+
+		}
+	}
+	_setup_cmd_template(pce_dev);
+
+	pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
+	pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
+
+	if (_init_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+	return pce_dev;
+
+err:
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
+						pce_dev->coh_pmem);
+err_dm_chan_cmd:
+	kfree(pce_dev->chan_ce_in_cmd);
+	kfree(pce_dev->chan_ce_out_cmd);
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+err_pce_dev:
+
+	kfree(pce_dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
+						pce_dev->coh_pmem);
+	clk_disable(pce_dev->ce_clk);
+	clk_disable(pce_dev->ce_core_clk);
+
+	clk_put(pce_dev->ce_clk);
+	clk_put(pce_dev->ce_core_clk);
+
+	kfree(pce_dev->chan_ce_in_cmd);
+	kfree(pce_dev->chan_ce_out_cmd);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = false;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	ce_support->aes_ccm = true;
+	ce_support->ota = false;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Crypto Engine driver");
+MODULE_VERSION("2.04");
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
new file mode 100644
index 0000000..18eff22
--- /dev/null
+++ b/drivers/crypto/msm/qcedev.c
@@ -0,0 +1,2095 @@
+/* Qualcomm CE device driver.
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mman.h>
+#include <linux/android_pmem.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <mach/scm.h>
+#include "inc/qcedev.h"
+#include "inc/qce.h"
+
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+enum qcedev_crypto_oper_type {
+  QCEDEV_CRYPTO_OPER_CIPHER	= 0,
+  QCEDEV_CRYPTO_OPER_SHA	= 1,
+  QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_control;
+
+struct qcedev_cipher_req {
+	struct ablkcipher_request creq;
+	void *cookie;
+};
+
+struct qcedev_sha_req {
+	struct ahash_request sreq;
+	struct qcedev_sha_ctxt *sha_ctxt;
+	void *cookie;
+};
+
+struct qcedev_async_req {
+	struct list_head			list;
+	struct completion			complete;
+	enum qcedev_crypto_oper_type		op_type;
+	union {
+		struct qcedev_cipher_op_req	cipher_op_req;
+		struct qcedev_sha_op_req	sha_op_req;
+	};
+	union{
+		struct qcedev_cipher_req	cipher_req;
+		struct qcedev_sha_req		sha_req;
+	};
+	struct qcedev_control			*podev;
+	int					err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace. */
+
+
+#define QCEDEV_DEV	"qcedev"
+
+struct qcedev_control{
+
+	/* CE features supported by platform */
+	struct msm_ce_hw_support platform_support;
+
+	bool ce_locked;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned magic;
+
+	struct list_head ready_commands;
+	struct qcedev_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+/*-------------------------------------------------------------------------
+* Resource Locking Service
+* ------------------------------------------------------------------------*/
+#define QCEDEV_CMD_ID				1
+#define QCEDEV_CE_LOCK_CMD			1
+#define QCEDEV_CE_UNLOCK_CMD			0
+#define NUM_RETRY				1000
+#define CE_BUSY					55
+
+static int qcedev_scm_cmd(int resource, int cmd, int *response)
+{
+#ifdef CONFIG_MSM_SCM
+
+	struct {
+		int resource;
+		int cmd;
+	} cmd_buf;
+
+	cmd_buf.resource = resource;
+	cmd_buf.cmd = cmd;
+
+	return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
+		sizeof(cmd_buf), response, sizeof(*response));
+
+#else
+	return 0;
+#endif
+}
+
+static int qcedev_unlock_ce(struct qcedev_control *podev)
+{
+	if ((podev->platform_support.ce_shared) && (podev->ce_locked == true)) {
+		int response = 0;
+
+		if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
+					QCEDEV_CE_UNLOCK_CMD, &response)) {
+			printk(KERN_ERR "%s Failed to release CE lock\n",
+				__func__);
+			return -EUSERS;
+		}
+		podev->ce_locked = false;
+	}
+	return 0;
+}
+
+static int qcedev_lock_ce(struct qcedev_control *podev)
+{
+	if ((podev->platform_support.ce_shared) &&
+					(podev->ce_locked == false)) {
+		int response = -CE_BUSY;
+		int i = 0;
+
+		do {
+			if (qcedev_scm_cmd(
+				podev->platform_support.shared_ce_resource,
+				QCEDEV_CE_LOCK_CMD, &response)) {
+				response = -EINVAL;
+				break;
+			}
+		} while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
+
+		if ((response == -CE_BUSY) && (i >= NUM_RETRY))
+			return -EUSERS;
+		if (response < 0)
+			return -EINVAL;
+
+		podev->ce_locked = true;
+	}
+
+	return 0;
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev);
+static int start_sha_req(struct qcedev_control *podev,
+			struct qcedev_sha_op_req *sha_op_req);
+
+static const struct file_operations qcedev_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcedev_ioctl,
+	.open = qcedev_open,
+	.release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qce",
+			.fops = &qcedev_fops,
+		},
+		.magic = QCEDEV_MAGIC,
+	},
+};
+
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+	u32 qcedev_dec_success;
+	u32 qcedev_dec_fail;
+	u32 qcedev_enc_success;
+	u32 qcedev_enc_fail;
+	u32 qcedev_sha_success;
+	u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev[MAX_QCE_DEVICE];
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned n)
+{
+	int i;
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		if (qce_dev[i].miscdevice.minor == n)
+			return &qce_dev[i];
+	}
+	return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+
+	podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		printk(KERN_ERR "%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+		printk(KERN_ERR "%s: invalid handle %p\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct qcedev_control *podev = (struct qcedev_control *)data;
+	struct qcedev_async_req *areq;
+	unsigned long flags = 0;
+	struct qcedev_async_req *new_req = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+again:
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct qcedev_async_req, list);
+		list_del(&new_req->list);
+		podev->active_command = new_req;
+		new_req->err = 0;
+		if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev, &areq->sha_op_req);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (areq)
+		complete(&areq->complete);
+
+	if (new_req && ret) {
+		complete(&new_req->complete);
+		spin_lock_irqsave(&podev->lock, flags);
+		podev->active_command = NULL;
+		areq = NULL;
+		ret = 0;
+		new_req = NULL;
+		goto again;
+	}
+
+	return;
+}
+
+static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret)
+{
+	struct qcedev_sha_req *areq;
+	struct qcedev_control *pdev;
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	areq = (struct qcedev_sha_req *) cookie;
+	pdev = (struct qcedev_control *) areq->cookie;
+
+	if (digest)
+		memcpy(&areq->sha_ctxt->digest[0], digest, 32);
+
+	if (authdata) {
+		areq->sha_ctxt->auth_data[0] = auth32[0];
+		areq->sha_ctxt->auth_data[1] = auth32[1];
+		areq->sha_ctxt->auth_data[2] = auth32[2];
+		areq->sha_ctxt->auth_data[3] = auth32[3];
+	}
+
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret)
+{
+	struct qcedev_cipher_req *areq;
+	struct qcedev_control *pdev;
+	struct qcedev_async_req *qcedev_areq;
+
+	areq = (struct qcedev_cipher_req *) cookie;
+	pdev = (struct qcedev_control *) areq->cookie;
+	qcedev_areq = pdev->active_command;
+
+	if (iv)
+		memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+					qcedev_areq->cipher_op_req.ivlen);
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_req creq;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->podev = podev;
+
+	qcedev_areq->cipher_req.cookie = qcedev_areq->podev;
+	creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
+	if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
+		creq.pmem = &qcedev_areq->cipher_op_req.pmem;
+	else
+		creq.pmem = NULL;
+
+	switch (qcedev_areq->cipher_op_req.alg) {
+	case QCEDEV_ALG_DES:
+		creq.alg = CIPHER_ALG_DES;
+		break;
+	case QCEDEV_ALG_3DES:
+		creq.alg = CIPHER_ALG_3DES;
+		break;
+	case QCEDEV_ALG_AES:
+		creq.alg = CIPHER_ALG_AES;
+		break;
+	default:
+		break;
+	};
+
+	switch (qcedev_areq->cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+	case QCEDEV_DES_MODE_CBC:
+		creq.mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_ECB:
+	case QCEDEV_DES_MODE_ECB:
+		creq.mode = QCE_MODE_ECB;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq.mode = QCE_MODE_CTR;
+		break;
+	case QCEDEV_AES_MODE_XTS:
+		creq.mode = QCE_MODE_XTS;
+		break;
+	default:
+		break;
+	};
+
+	if ((creq.alg == CIPHER_ALG_AES) &&
+		(creq.mode == QCE_MODE_CTR)) {
+		creq.dir = QCE_ENCRYPT;
+	} else {
+		if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
+			creq.dir = QCE_ENCRYPT;
+		else
+			creq.dir = QCE_DECRYPT;
+	}
+
+	creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+	creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+
+	creq.enckey =  &qcedev_areq->cipher_op_req.enckey[0];
+	creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+	creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+	if (qcedev_areq->cipher_op_req.encklen == 0) {
+		if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+			|| (qcedev_areq->cipher_op_req.op ==
+				QCEDEV_OPER_DEC_NO_KEY))
+			creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+		else {
+			int i;
+
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+				if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+					break;
+			}
+
+			if ((podev->platform_support.hw_key_support == 1) &&
+						(i == QCEDEV_MAX_KEY_SIZE))
+				creq.op = QCE_REQ_ABLK_CIPHER;
+			else {
+				ret = -EINVAL;
+				goto unsupported;
+			}
+		}
+	} else {
+		creq.op = QCE_REQ_ABLK_CIPHER;
+	}
+
+	creq.qce_cb = qcedev_cipher_req_cb;
+	creq.areq = (void *)&qcedev_areq->cipher_req;
+
+	ret = qce_ablk_cipher_req(podev->qce, &creq);
+unsupported:
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int start_sha_req(struct qcedev_control *podev,
+			struct qcedev_sha_op_req *sha_op_req)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_sha_req sreq;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->podev = podev;
+
+	switch (qcedev_areq->sha_op_req.alg) {
+	case QCEDEV_ALG_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		break;
+	case QCEDEV_ALG_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		break;
+	case QCEDEV_ALG_SHA1_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA1_HMAC;
+			sreq.authkey =
+				&qcedev_areq->sha_op_req.ctxt.authkey[0];
+
+		} else {
+			sreq.alg = QCE_HASH_SHA1;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_SHA256_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA256_HMAC;
+			sreq.authkey =
+				&qcedev_areq->sha_op_req.ctxt.authkey[0];
+
+		} else {
+			sreq.alg = QCE_HASH_SHA256;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_AES_CMAC:
+		sreq.alg = QCE_HASH_AES_CMAC;
+		sreq.authkey = &qcedev_areq->sha_op_req.ctxt.authkey[0];
+		sreq.authklen = qcedev_areq->sha_op_req.authklen;
+		break;
+	default:
+		break;
+	};
+
+	qcedev_areq->sha_req.cookie = podev;
+
+	sreq.qce_cb = qcedev_sha_req_cb;
+	if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+		sreq.auth_data[0] = sha_op_req->ctxt.auth_data[0];
+		sreq.auth_data[1] = sha_op_req->ctxt.auth_data[1];
+		sreq.auth_data[2] = sha_op_req->ctxt.auth_data[2];
+		sreq.auth_data[3] = sha_op_req->ctxt.auth_data[3];
+		sreq.digest = &sha_op_req->ctxt.digest[0];
+		sreq.first_blk = sha_op_req->ctxt.first_blk;
+		sreq.last_blk = sha_op_req->ctxt.last_blk;
+	}
+	sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+	sreq.src = qcedev_areq->sha_req.sreq.src;
+	sreq.areq = (void *)&qcedev_areq->sha_req;
+	qcedev_areq->sha_req.sha_ctxt =
+		(struct qcedev_sha_ctxt *)(&sha_op_req->ctxt);
+
+	ret = qce_process_sha_req(podev->qce, &sreq);
+
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_control *podev)
+{
+	unsigned long flags = 0;
+	int ret = 0;
+	struct qcedev_stat *pstat;
+
+	qcedev_areq->err = 0;
+
+	ret = qcedev_lock_ce(podev);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	if (podev->active_command == NULL) {
+		podev->active_command = qcedev_areq;
+		if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev, &qcedev_areq->sha_op_req);
+	} else {
+		list_add_tail(&qcedev_areq->list, &podev->ready_commands);
+	}
+
+	if (ret != 0)
+		podev->active_command = NULL;
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (ret == 0)
+		wait_for_completion(&qcedev_areq->complete);
+
+	ret = qcedev_unlock_ce(podev);
+	if (ret)
+			qcedev_areq->err = -EIO;
+
+	pstat = &_qcedev_stat[podev->pdev->id];
+	if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+		switch (qcedev_areq->cipher_op_req.op) {
+		case QCEDEV_OPER_DEC:
+			if (qcedev_areq->err)
+				pstat->qcedev_dec_fail++;
+			else
+				pstat->qcedev_dec_success++;
+			break;
+		case QCEDEV_OPER_ENC:
+			if (qcedev_areq->err)
+				pstat->qcedev_enc_fail++;
+			else
+				pstat->qcedev_enc_success++;
+			break;
+		default:
+			break;
+		};
+	} else {
+		if (qcedev_areq->err)
+			pstat->qcedev_sha_fail++;
+		else
+			pstat->qcedev_sha_success++;
+	}
+
+	return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	struct qcedev_sha_ctxt *sha_ctxt = &areq->sha_op_req.ctxt;
+
+	memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+	sha_ctxt->first_blk = 1;
+
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+		memcpy(&sha_ctxt->digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+			memcpy(&sha_ctxt->digest[0],
+					&_std_init_vector_sha256_uint8[0],
+					SHA256_DIGEST_SIZE);
+			sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+		}
+	}
+	return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	struct scatterlist sg_src[2];
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t t_buf = qcedev_areq->sha_op_req.ctxt.trailing_buf_len;
+	uint32_t sha_block_size;
+
+	total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	if (total <= sha_block_size) {
+		uint32_t len =  qcedev_areq->sha_op_req.data_len;
+
+		i = 0;
+
+		k_src = &qcedev_areq->sha_op_req.ctxt.trailing_buf[t_buf];
+
+		/* Copy data from user src(s) */
+		while (len > 0) {
+			user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+			if (user_src && __copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len))
+				return -EFAULT;
+
+			len -= qcedev_areq->sha_op_req.data[i].len;
+			k_src += qcedev_areq->sha_op_req.data[i].len;
+			i++;
+		}
+		qcedev_areq->sha_op_req.ctxt.trailing_buf_len = total;
+
+		return 0;
+	}
+
+
+	k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
+							CACHE_LINE_SIZE);
+	k_src = k_align_src;
+
+	/* check for trailing buffer from previous updates and append it */
+	if (t_buf > 0) {
+		memcpy(k_src, &qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+								t_buf);
+		k_src += t_buf;
+	}
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	if (user_src && __copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[0].len)) {
+		kfree(k_buf_src);
+		return -EFAULT;
+	}
+	k_src += qcedev_areq->sha_op_req.data[0].len;
+	for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && __copy_from_user(k_src,
+					(void __user *)user_src,
+					qcedev_areq->sha_op_req.data[i].len)) {
+			kfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+	trailing_buf_len =  CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
+						total-trailing_buf_len);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+	/*  update sha_ctxt trailing buf content to new trailing buf */
+	if (trailing_buf_len > 0) {
+		memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0, 64);
+		memcpy(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+			(k_src - trailing_buf_len),
+			trailing_buf_len);
+	}
+	qcedev_areq->sha_op_req.ctxt.trailing_buf_len = trailing_buf_len;
+
+	err = submit_req(qcedev_areq, podev);
+
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+
+	/* verify address src(s) */
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
+		if (!access_ok(VERIFY_READ,
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
+			qcedev_areq->sha_op_req.data[i].len))
+			return -EFAULT;
+
+	if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct	qcedev_sha_op_req *saved_req;
+		struct	qcedev_sha_op_req req;
+		struct	qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+
+		/* save the original req structure */
+		saved_req =
+			kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
+		if (saved_req == NULL) {
+			printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
+			__func__, (uint32_t)saved_req);
+			return -ENOMEM;
+		}
+		memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
+		memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+				sreq->data[0].len = QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					sreq->data[0].vaddr =
+							sreq->data[i].vaddr;
+				}
+
+				sreq->data_len = QCE_MAX_OPER_DATA;
+				sreq->entries = 1;
+
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+									podev);
+
+				sreq->data[i].len = req.data[i].len -
+							QCE_MAX_OPER_DATA;
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							QCE_MAX_OPER_DATA;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + sreq->data[j].len) >=
+							QCE_MAX_OPER_DATA) {
+						sreq->data[j].len =
+						(QCE_MAX_OPER_DATA - total);
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += sreq->data[j].len;
+				}
+
+				sreq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						sreq->data[k].len =
+							sreq->data[i+k].len;
+						sreq->data[k].vaddr =
+							sreq->data[i+k].vaddr;
+					}
+				sreq->entries = num_entries;
+
+				i = j;
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+									podev);
+				num_entries = 0;
+
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							sreq->data[i].len;
+				sreq->data[i].len = req.data[i].len -
+							sreq->data[i].len;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+
+				if (sreq->data[i].len == 0)
+					i++;
+			}
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+		/* Restore the original req structure */
+		for (i = 0; i < saved_req->entries; i++) {
+			sreq->data[i].len = saved_req->data[i].len;
+			sreq->data[i].vaddr = saved_req->data[i].vaddr;
+		}
+		sreq->entries = saved_req->entries;
+		sreq->data_len = saved_req->data_len;
+		kfree(saved_req);
+	} else
+		err = qcedev_sha_update_max_xfer(qcedev_areq, podev);
+
+	return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint32_t total;
+
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.last_blk = 1;
+
+	total = qcedev_areq->sha_op_req.ctxt.trailing_buf_len;
+
+	if (total) {
+		k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+					GFP_KERNEL);
+		if (k_buf_src == NULL)
+			return -ENOMEM;
+
+		k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
+							CACHE_LINE_SIZE);
+		memcpy(k_align_src,
+				&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+				total);
+	}
+	qcedev_areq->sha_op_req.ctxt.last_blk = 1;
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+
+	err = submit_req(qcedev_areq, podev);
+
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[0] = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[1] = 0;
+	qcedev_areq->sha_op_req.ctxt.trailing_buf_len = 0;
+	memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0, 64);
+
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	struct scatterlist sg_src[2];
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+
+	total = qcedev_areq->sha_op_req.data_len;
+
+	/* verify address src(s) */
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
+		if (!access_ok(VERIFY_READ,
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
+			qcedev_areq->sha_op_req.data[i].len))
+			return -EFAULT;
+
+	/* Verify Source Address */
+	if (!access_ok(VERIFY_READ,
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+			return -EFAULT;
+	if (__copy_from_user(&qcedev_areq->sha_op_req.ctxt.authkey[0],
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+		return -EFAULT;
+
+
+	k_buf_src = kmalloc(total, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_src = k_buf_src;
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && __copy_from_user(k_src, (void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len)) {
+			kfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+	qcedev_areq->sha_op_req.ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+	err = submit_req(qcedev_areq, podev);
+
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+					struct qcedev_control *podev)
+{
+	int err = 0;
+
+	if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+		/* Verify Source Address */
+		if (!access_ok(VERIFY_READ,
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+		if (__copy_from_user(&areq->sha_op_req.ctxt.authkey[0],
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+	} else {
+		struct qcedev_async_req authkey_areq;
+
+		init_completion(&authkey_areq.complete);
+
+		authkey_areq.sha_op_req.entries = 1;
+		authkey_areq.sha_op_req.data[0].vaddr =
+						areq->sha_op_req.authkey;
+		authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.diglen = 0;
+		memset(&authkey_areq.sha_op_req.digest[0], 0,
+						QCEDEV_MAX_SHA_DIGEST);
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+				authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+				authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+		authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		qcedev_sha_init(&authkey_areq, podev);
+		err = qcedev_sha_update(&authkey_areq, podev);
+		if (!err)
+			err = qcedev_sha_final(&authkey_areq, podev);
+		else
+			return err;
+		memcpy(&areq->sha_op_req.ctxt.authkey[0],
+				&authkey_areq.sha_op_req.ctxt.digest[0],
+				authkey_areq.sha_op_req.ctxt.diglen);
+	}
+	return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint8_t *k_src = NULL;
+	uint32_t sha_block_size = 0;
+	uint32_t sha_digest_size = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		sha_digest_size = SHA1_DIGEST_SIZE;
+		sha_block_size = SHA1_BLOCK_SIZE;
+	} else {
+		if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+			sha_digest_size = SHA256_DIGEST_SIZE;
+			sha_block_size = SHA256_BLOCK_SIZE;
+		}
+	}
+	k_src = kmalloc(sha_block_size, GFP_KERNEL);
+	if (k_src == NULL)
+		return -ENOMEM;
+
+	/* check for trailing buffer from previous updates and append it */
+	memcpy(k_src, &qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+			qcedev_areq->sha_op_req.ctxt.trailing_buf_len);
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+	memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0,
+							sha_block_size);
+	memcpy(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
+					&qcedev_areq->sha_op_req.ctxt.digest[0],
+					sha_digest_size);
+	qcedev_areq->sha_op_req.ctxt.trailing_buf_len = sha_digest_size;
+
+	qcedev_areq->sha_op_req.ctxt.first_blk = 1;
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[0] = 0;
+	qcedev_areq->sha_op_req.ctxt.auth_data[1] = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		memcpy(&qcedev_areq->sha_op_req.ctxt.digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		qcedev_areq->sha_op_req.ctxt.diglen = SHA1_DIGEST_SIZE;
+	}
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+		memcpy(&qcedev_areq->sha_op_req.ctxt.digest[0],
+			&_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+		qcedev_areq->sha_op_req.ctxt.diglen = SHA256_DIGEST_SIZE;
+	}
+	err = submit_req(qcedev_areq, podev);
+
+	qcedev_areq->sha_op_req.ctxt.last_blk = 0;
+	qcedev_areq->sha_op_req.ctxt.first_blk = 0;
+
+	kfree(k_src);
+	return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+				struct qcedev_control *podev, bool ikey)
+{
+	int i;
+	uint32_t constant;
+	uint32_t sha_block_size;
+
+	if (ikey)
+		constant = 0x36;
+	else
+		constant = 0x5c;
+
+	if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	memset(&areq->sha_op_req.ctxt.trailing_buf[0], 0, sha_block_size);
+	for (i = 0; i < sha_block_size; i++)
+		areq->sha_op_req.ctxt.trailing_buf[i] =
+				(areq->sha_op_req.ctxt.authkey[i] ^ constant);
+
+	areq->sha_op_req.ctxt.trailing_buf_len = sha_block_size;
+	return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	int err;
+
+	qcedev_sha_init(areq, podev);
+	err = qcedev_set_hmac_auth_key(areq, podev);
+	if (err)
+		return err;
+	if (!podev->ce_support.sha_hmac)
+		qcedev_hmac_update_iokey(areq, podev, true);
+	return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	int err;
+
+	err = qcedev_sha_final(areq, podev);
+	if (podev->ce_support.sha_hmac)
+		return err;
+
+	qcedev_hmac_update_iokey(areq, podev, false);
+	err = qcedev_hmac_get_ohash(areq, podev);
+	if (err)
+		return err;
+	err = qcedev_sha_final(areq, podev);
+
+	return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_init(areq, podev);
+	else
+		return qcedev_hmac_init(areq, podev);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_control *podev)
+{
+	return qcedev_sha_update(qcedev_areq, podev);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+				struct qcedev_control *podev)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_final(areq, podev);
+	else
+		return qcedev_hmac_final(areq, podev);
+}
+
+static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+						struct qcedev_control *podev)
+{
+	int i = 0;
+	int err = 0;
+	struct scatterlist *sg_src = NULL;
+	struct scatterlist *sg_dst = NULL;
+	struct scatterlist *sg_ndex = NULL;
+	struct file *file_src = NULL;
+	struct file *file_dst = NULL;
+	unsigned long paddr;
+	unsigned long kvaddr;
+	unsigned long len;
+
+	sg_src = kmalloc((sizeof(struct scatterlist) *
+				areq->cipher_op_req.entries),	GFP_KERNEL);
+	if (sg_src == NULL) {
+		printk(KERN_ERR "%s: Can't Allocate memory:s g_src 0x%x\n",
+			__func__, (uint32_t)sg_src);
+		return -ENOMEM;
+
+	}
+	memset(sg_src, 0, (sizeof(struct scatterlist) *
+				areq->cipher_op_req.entries));
+	sg_ndex = sg_src;
+	areq->cipher_req.creq.src = sg_src;
+
+	/* address src */
+	get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
+					&kvaddr, &len, &file_src);
+
+	for (i = 0; i < areq->cipher_op_req.entries; i++) {
+		sg_set_buf(sg_ndex,
+		((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
+		areq->cipher_op_req.pmem.src[i].len);
+		sg_ndex++;
+	}
+	sg_mark_end(--sg_ndex);
+
+	for (i = 0; i < areq->cipher_op_req.entries; i++)
+		areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
+
+	/* address dst */
+	/* If not place encryption/decryption */
+	if (areq->cipher_op_req.in_place_op != 1) {
+		sg_dst = kmalloc((sizeof(struct scatterlist) *
+				areq->cipher_op_req.entries), GFP_KERNEL);
+		if (sg_dst == NULL)
+			return -ENOMEM;
+		memset(sg_dst, 0, (sizeof(struct scatterlist) *
+					areq->cipher_op_req.entries));
+		areq->cipher_req.creq.dst = sg_dst;
+		sg_ndex = sg_dst;
+
+		get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
+					&kvaddr, &len, &file_dst);
+		for (i = 0; i < areq->cipher_op_req.entries; i++)
+			sg_set_buf(sg_ndex++,
+			((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
+			+ kvaddr), areq->cipher_op_req.pmem.dst[i].len);
+		sg_mark_end(--sg_ndex);
+
+		for (i = 0; i < areq->cipher_op_req.entries; i++)
+			areq->cipher_op_req.pmem.dst[i].offset +=
+							(uint32_t)paddr;
+	} else {
+		areq->cipher_req.creq.dst = sg_src;
+		for (i = 0; i < areq->cipher_op_req.entries; i++) {
+			areq->cipher_op_req.pmem.dst[i].offset =
+				areq->cipher_op_req.pmem.src[i].offset;
+			areq->cipher_op_req.pmem.dst[i].len =
+				areq->cipher_op_req.pmem.src[i].len;
+		}
+	}
+
+	areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+
+	err = submit_req(areq, podev);
+
+	kfree(sg_src);
+	kfree(sg_dst);
+
+	if (file_dst)
+		put_pmem_file(file_dst);
+	if (file_src)
+		put_pmem_file(file_src);
+
+	return err;
+};
+
+
+static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
+						struct qcedev_control *podev)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	struct qcedev_cipher_op_req *saved_req;
+	struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
+
+	saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+	if (saved_req == NULL) {
+		printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
+		__func__, (uint32_t)saved_req);
+		return -ENOMEM;
+	}
+	memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+	if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
+				creq->pmem.src[0].len =	QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					creq->pmem.src[0].offset =
+						creq->pmem.src[i].offset;
+				}
+
+				creq->data_len = QCE_MAX_OPER_DATA;
+				creq->entries = 1;
+
+				err =
+				qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
+								podev);
+
+				creq->pmem.src[i].len =	req.pmem.src[i].len -
+							QCE_MAX_OPER_DATA;
+				creq->pmem.src[i].offset =
+						req.pmem.src[i].offset +
+						QCE_MAX_OPER_DATA;
+				req.pmem.src[i].offset =
+						creq->pmem.src[i].offset;
+				req.pmem.src[i].len = creq->pmem.src[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->pmem.src[j].len)
+							>= QCE_MAX_OPER_DATA) {
+						creq->pmem.src[j].len =
+						QCE_MAX_OPER_DATA - total;
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += creq->pmem.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->pmem.src[k].len =
+						creq->pmem.src[i+k].len;
+						creq->pmem.src[k].offset =
+						creq->pmem.src[i+k].offset;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err =
+				qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
+								podev);
+				num_entries = 0;
+
+					creq->pmem.src[i].offset =
+						req.pmem.src[i].offset +
+						creq->pmem.src[i].len;
+					creq->pmem.src[i].len =
+						req.pmem.src[i].len -
+						creq->pmem.src[i].len;
+					req.pmem.src[i].offset =
+						creq->pmem.src[i].offset;
+					req.pmem.src[i].len =
+						creq->pmem.src[i].len;
+
+				if (creq->pmem.src[i].len == 0)
+					i++;
+			}
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+	} else
+		err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, podev);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->pmem.src[i].len = saved_req->pmem.src[i].len;
+		creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	kfree(saved_req);
+
+	return err;
+
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+				int *di, struct qcedev_control *podev,
+				uint8_t *k_align_src)
+{
+	int err = 0;
+	int i = 0;
+	int dst_i = *di;
+	struct scatterlist sg_src;
+	uint32_t byteoffset = 0;
+	uint8_t *user_src = NULL;
+	uint8_t *k_align_dst = k_align_src;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+
+	user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
+	if (user_src && __copy_from_user((k_align_src + byteoffset),
+				(void __user *)user_src,
+				areq->cipher_op_req.vbuf.src[0].len))
+		return -EFAULT;
+
+	k_align_src += areq->cipher_op_req.vbuf.src[0].len;
+
+	for (i = 1; i < areq->cipher_op_req.entries; i++) {
+		user_src =
+			(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
+		if (user_src && __copy_from_user(k_align_src,
+					(void __user *)user_src,
+					areq->cipher_op_req.vbuf.src[i].len)) {
+			return -EFAULT;
+		}
+		k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+	}
+
+	/* restore src beginning */
+	k_align_src = k_align_dst;
+	areq->cipher_op_req.data_len += byteoffset;
+
+	areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+	areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+	/* In place encryption/decryption */
+	sg_set_buf(areq->cipher_req.creq.src,
+					k_align_dst,
+					areq->cipher_op_req.data_len);
+	sg_mark_end(areq->cipher_req.creq.src);
+
+	areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+	areq->cipher_op_req.entries = 1;
+
+	err = submit_req(areq, podev);
+
+	/* copy data to destination buffer*/
+	creq->data_len -= byteoffset;
+
+	while (creq->data_len > 0) {
+		if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+			if (err == 0 && __copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->vbuf.dst[dst_i].len))
+					return -EFAULT;
+
+			k_align_dst += creq->vbuf.dst[dst_i].len +
+						byteoffset;
+			creq->data_len -= creq->vbuf.dst[dst_i].len;
+			dst_i++;
+		} else {
+				if (err == 0 && __copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+				(k_align_dst + byteoffset),
+				creq->data_len))
+					return -EFAULT;
+
+			k_align_dst += creq->data_len;
+			creq->vbuf.dst[dst_i].len -= creq->data_len;
+			creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+			creq->data_len = 0;
+		}
+	}
+	*di = dst_i;
+
+	return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+						struct qcedev_control *podev)
+{
+	int err = 0;
+	int di = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	uint32_t byteoffset = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	uint32_t len;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+	uint32_t max_data_xfer;
+	struct qcedev_cipher_op_req *saved_req;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+	/* Verify Source Address's */
+	for (i = 0; i < areq->cipher_op_req.entries; i++)
+		if (!access_ok(VERIFY_READ,
+			(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
+					areq->cipher_op_req.vbuf.src[i].len))
+			return -EFAULT;
+
+	/* Verify Destination Address's */
+	if (areq->cipher_op_req.in_place_op != 1)
+		for (i = 0; i < areq->cipher_op_req.entries; i++)
+			if (!access_ok(VERIFY_READ,
+			(void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
+					areq->cipher_op_req.vbuf.dst[i].len))
+				return -EFAULT;
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+	k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL) {
+		printk(KERN_ERR "%s: Can't Allocate memory: k_buf_src 0x%x\n",
+			__func__, (uint32_t)k_buf_src);
+		return -ENOMEM;
+	}
+	k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
+							CACHE_LINE_SIZE);
+	max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+	saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+	if (saved_req == NULL) {
+		printk(KERN_ERR "%s: Can't Allocate memory:saved_req 0x%x\n",
+			__func__, (uint32_t)saved_req);
+		kfree(k_buf_src);
+		return -ENOMEM;
+
+	}
+	memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+	if (areq->cipher_op_req.data_len > max_data_xfer) {
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->vbuf.src[i].len > max_data_xfer) {
+				creq->vbuf.src[0].len =	max_data_xfer;
+				if (i > 0) {
+					creq->vbuf.src[0].vaddr =
+						creq->vbuf.src[i].vaddr;
+				}
+
+				creq->data_len = max_data_xfer;
+				creq->entries = 1;
+
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, podev, k_align_src);
+				if (err < 0) {
+					kfree(k_buf_src);
+					kfree(saved_req);
+					return err;
+				}
+
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							max_data_xfer;
+				creq->vbuf.src[i].vaddr =
+						req.vbuf.src[i].vaddr +
+						max_data_xfer;
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+			} else {
+				total = areq->cipher_op_req.byteoffset;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->vbuf.src[j].len)
+							>= max_data_xfer) {
+						creq->vbuf.src[j].len =
+						max_data_xfer - total;
+						total = max_data_xfer;
+						break;
+					}
+					total += creq->vbuf.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->vbuf.src[k].len =
+						creq->vbuf.src[i+k].len;
+						creq->vbuf.src[k].vaddr =
+						creq->vbuf.src[i+k].vaddr;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, podev, k_align_src);
+				if (err < 0) {
+					kfree(k_buf_src);
+					kfree(saved_req);
+					return err;
+				}
+
+				num_entries = 0;
+				areq->cipher_op_req.byteoffset = 0;
+
+				creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+					+ creq->vbuf.src[i].len;
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							creq->vbuf.src[i].len;
+
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+				if (creq->vbuf.src[i].len == 0)
+					i++;
+			}
+
+			areq->cipher_op_req.byteoffset = 0;
+			max_data_xfer = QCE_MAX_OPER_DATA;
+			byteoffset = 0;
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+	} else
+		err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, podev,
+								k_align_src);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+		creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+	}
+	for (len = 0, i = 0; len < saved_req->data_len; i++) {
+		creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+		creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+		len += saved_req->vbuf.dst[i].len;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	creq->byteoffset = saved_req->byteoffset;
+
+	kfree(saved_req);
+	kfree(k_buf_src);
+	return err;
+
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	if ((req->entries == 0) || (req->data_len == 0))
+		goto error;
+	if ((req->alg >= QCEDEV_ALG_LAST) ||
+		(req->mode >= QCEDEV_AES_DES_MODE_LAST))
+		goto error;
+	if (req->alg == QCEDEV_ALG_AES) {
+		if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+					(!podev->ce_support.aes_xts))
+			goto error;
+		/* if intending to use HW key make sure key fields are set
+		 * correctly and HW key is indeed supported in target
+		 */
+		if (req->encklen == 0) {
+			int i;
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
+				if (req->enckey[i])
+					goto error;
+			if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+				(req->op != QCEDEV_OPER_DEC_NO_KEY))
+				if (!podev->platform_support.hw_key_support)
+					goto error;
+		} else {
+			if (req->encklen == QCEDEV_AES_KEY_192) {
+				if (!podev->ce_support.aes_key_192)
+					goto error;
+			} else {
+				/* if not using HW key make sure key
+				 * length is valid
+				 */
+				if (!((req->encklen == QCEDEV_AES_KEY_128) ||
+					(req->encklen == QCEDEV_AES_KEY_256)))
+					goto error;
+			}
+		}
+	}
+	/* if using a byteoffset, make sure it is CTR mode using vbuf */
+	if (req->byteoffset) {
+		if (req->mode != QCEDEV_AES_MODE_CTR)
+			goto error;
+		else { /* if using CTR mode make sure not using Pmem */
+			if (req->use_pmem)
+				goto error;
+		}
+	}
+	/* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
+	if (req->use_pmem) {
+		if (!req->in_place_op)
+			goto error;
+	}
+	/* Ensure zer ivlen for ECB  mode  */
+	if (req->ivlen != 0) {
+		if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+				(req->mode == QCEDEV_DES_MODE_ECB))
+			goto error;
+	} else {
+		if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+				(req->mode != QCEDEV_DES_MODE_ECB))
+			goto error;
+	}
+
+	return 0;
+error:
+	return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+						struct qcedev_control *podev)
+{
+	if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+				(!podev->ce_support.cmac))
+		goto sha_error;
+
+	if ((req->entries == 0) || (req->data_len == 0))
+		goto sha_error;
+
+	if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
+		goto sha_error;
+
+	return 0;
+sha_error:
+	return -EINVAL;
+}
+
+static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qcedev_control *podev;
+	struct qcedev_async_req qcedev_areq;
+	struct qcedev_stat *pstat;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+		printk(KERN_ERR "%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&qcedev_areq.complete);
+	pstat = &_qcedev_stat[podev->pdev->id];
+
+	switch (cmd) {
+	case QCEDEV_IOCTL_LOCK_CE:
+		err = qcedev_lock_ce(podev);
+		break;
+	case QCEDEV_IOCTL_UNLOCK_CE:
+		err = qcedev_unlock_ce(podev);
+		break;
+	case QCEDEV_IOCTL_ENC_REQ:
+	case QCEDEV_IOCTL_DEC_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.cipher_op_req,
+				(void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+		if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
+				podev))
+			return -EINVAL;
+
+		if (qcedev_areq.cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
+			err = qcedev_pmem_ablk_cipher(&qcedev_areq, podev);
+		else
+			err = qcedev_vbuf_ablk_cipher(&qcedev_areq, podev);
+		if (err)
+			return err;
+		if (__copy_to_user((void __user *)arg,
+					&qcedev_areq.cipher_op_req,
+					sizeof(struct qcedev_cipher_op_req)))
+				return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_SHA_INIT_REQ:
+
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_init(&qcedev_areq, podev);
+		if (err)
+			return err;
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+				return -EFAULT;
+		break;
+	case QCEDEV_IOCTL_GET_CMAC_REQ:
+		if (!podev->ce_support.cmac)
+			return -ENOTTY;
+	case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+			err = qcedev_hash_cmac(&qcedev_areq, podev);
+			if (err)
+				return err;
+		} else {
+			err = qcedev_hash_update(&qcedev_areq, podev);
+			if (err)
+				return err;
+		}
+
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&qcedev_areq.sha_op_req.ctxt.digest[0],
+				qcedev_areq.sha_op_req.ctxt.diglen);
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_final(&qcedev_areq, podev);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen =
+				qcedev_areq.sha_op_req.ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&qcedev_areq.sha_op_req.ctxt.digest[0],
+				qcedev_areq.sha_op_req.ctxt.diglen);
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_GET_SHA_REQ:
+
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+
+		if (__copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		qcedev_hash_init(&qcedev_areq, podev);
+		err = qcedev_hash_update(&qcedev_areq, podev);
+		if (err)
+			return err;
+		err = qcedev_hash_final(&qcedev_areq, podev);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen =
+				qcedev_areq.sha_op_req.ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&qcedev_areq.sha_op_req.ctxt.digest[0],
+				qcedev_areq.sha_op_req.ctxt.diglen);
+		if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct qcedev_control *podev;
+	struct msm_ce_hw_support *platform_support;
+
+	if (pdev->id >= MAX_QCE_DEVICE) {
+		printk(KERN_ERR "%s: device id %d  exceeds allowed %d\n",
+			__func__, pdev->id, MAX_QCE_DEVICE);
+		return -ENOENT;
+	}
+	podev = &qce_dev[pdev->id];
+
+	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
+	podev->platform_support.ce_shared = platform_support->ce_shared;
+	podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+	podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+	podev->ce_locked = false;
+
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+
+	spin_lock_init(&podev->lock);
+
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	podev->qce = handle;
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+	qce_hw_support(podev->qce, &podev->ce_support);
+	rc = misc_register(&podev->miscdevice);
+
+	if (rc >= 0)
+		return 0;
+
+	if (handle)
+		qce_close(handle);
+	platform_set_drvdata(pdev, NULL);
+	podev->qce = NULL;
+	podev->pdev = NULL;
+	return rc;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+	if (podev->qce)
+		qce_close(podev->qce);
+
+	if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&podev->miscdevice);
+	tasklet_kill(&podev->done_tasklet);
+	return 0;
+};
+
+static struct platform_driver qcedev_plat_driver = {
+	.probe = qcedev_probe,
+	.remove = qcedev_remove,
+	.driver = {
+		.name = "qce",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcedev_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcedev_stat[id];
+	len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm QCE dev driver %d Statistics:\n",
+				id + 1);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation success       : %d\n",
+					pstat->qcedev_enc_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail   : %d\n",
+					pstat->qcedev_enc_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Decryption operation success     : %d\n",
+					pstat->qcedev_dec_success);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail          : %d\n",
+					pstat->qcedev_dec_fail);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcedev = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcedev);
+
+	rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+
+	int qcedev = *((int *) file->private_data);
+
+	memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	int i;
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcedev", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
+		_debug_qcedev[i] = i;
+		dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcedev[i], &_debug_stats_ops);
+		if (dent == NULL) {
+			pr_err("qcedev debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+			rc = PTR_ERR(dent);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int qcedev_init(void)
+{
+	int rc;
+
+	rc = _qcedev_debug_init();
+	if (rc)
+		return rc;
+	return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
+MODULE_VERSION("1.20");
+
+module_init(qcedev_init);
+module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
new file mode 100644
index 0000000..c4fd64b
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto.c
@@ -0,0 +1,3274 @@
+/* Qualcomm Crypto driver
+ *
+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include <mach/scm.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include "inc/qce.h"
+
+
+#define MAX_CRYPTO_DEVICE 3
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct crypto_stat {
+	u32 aead_sha1_aes_enc;
+	u32 aead_sha1_aes_dec;
+	u32 aead_sha1_des_enc;
+	u32 aead_sha1_des_dec;
+	u32 aead_sha1_3des_enc;
+	u32 aead_sha1_3des_dec;
+	u32 aead_op_success;
+	u32 aead_op_fail;
+	u32 ablk_cipher_aes_enc;
+	u32 ablk_cipher_aes_dec;
+	u32 ablk_cipher_des_enc;
+	u32 ablk_cipher_des_dec;
+	u32 ablk_cipher_3des_enc;
+	u32 ablk_cipher_3des_dec;
+	u32 ablk_cipher_op_success;
+	u32 ablk_cipher_op_fail;
+	u32 sha1_digest;
+	u32 sha256_digest;
+	u32 sha_op_success;
+	u32 sha_op_fail;
+	u32 sha1_hmac_digest;
+	u32 sha256_hmac_digest;
+	u32 sha_hmac_op_success;
+	u32 sha_hmac_op_fail;
+};
+static struct crypto_stat _qcrypto_stat[MAX_CRYPTO_DEVICE];
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+
+struct crypto_priv {
+	/* CE features supported by target device*/
+	struct msm_ce_hw_support platform_support;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+	/* the lock protects queue and req*/
+	spinlock_t lock;
+
+	/* qce handle */
+	void *qce;
+
+	/* list of  registered algorithms */
+	struct list_head alg_list;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	/* current active request */
+	struct crypto_async_request *req;
+	int res;
+
+	/* request queue */
+	struct crypto_queue queue;
+
+	uint32_t ce_lock_count;
+
+	struct work_struct unlock_ce_ws;
+
+	struct tasklet_struct done_tasklet;
+};
+
+
+/*-------------------------------------------------------------------------
+* Resource Locking Service
+* ------------------------------------------------------------------------*/
+#define QCRYPTO_CMD_ID				1
+#define QCRYPTO_CE_LOCK_CMD			1
+#define QCRYPTO_CE_UNLOCK_CMD			0
+#define NUM_RETRY				1000
+#define CE_BUSY				        55
+
+static int qcrypto_scm_cmd(int resource, int cmd, int *response)
+{
+#ifdef CONFIG_MSM_SCM
+
+	struct {
+		int resource;
+		int cmd;
+	} cmd_buf;
+
+	cmd_buf.resource = resource;
+	cmd_buf.cmd = cmd;
+
+	return scm_call(SCM_SVC_TZ, QCRYPTO_CMD_ID, &cmd_buf,
+		sizeof(cmd_buf), response, sizeof(*response));
+
+#else
+	return 0;
+#endif
+}
+
+static void qcrypto_unlock_ce(struct work_struct *work)
+{
+	int response = 0;
+	unsigned long flags;
+	struct crypto_priv *cp = container_of(work, struct crypto_priv,
+							unlock_ce_ws);
+	if (cp->ce_lock_count == 1)
+		BUG_ON(qcrypto_scm_cmd(cp->platform_support.shared_ce_resource,
+				QCRYPTO_CE_UNLOCK_CMD, &response) != 0);
+	spin_lock_irqsave(&cp->lock, flags);
+	cp->ce_lock_count--;
+	spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static int qcrypto_lock_ce(struct crypto_priv *cp)
+{
+	unsigned long flags;
+	int response = -CE_BUSY;
+	int i = 0;
+
+	if (cp->ce_lock_count == 0) {
+		do {
+			if (qcrypto_scm_cmd(
+				cp->platform_support.shared_ce_resource,
+				QCRYPTO_CE_LOCK_CMD, &response)) {
+				response = -EINVAL;
+				break;
+			}
+		} while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
+
+		if ((response == -CE_BUSY) && (i >= NUM_RETRY))
+			return -EUSERS;
+		if (response < 0)
+			return -EINVAL;
+	}
+	spin_lock_irqsave(&cp->lock, flags);
+	cp->ce_lock_count++;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+
+	return 0;
+}
+
+enum qcrypto_alg_type {
+	QCRYPTO_ALG_CIPHER	= 0,
+	QCRYPTO_ALG_SHA	= 1,
+	QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+	struct list_head entry;
+	struct crypto_alg cipher_alg;
+	struct ahash_alg sha_alg;
+	enum qcrypto_alg_type alg_type;
+	struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE	64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH	16
+
+struct qcrypto_cipher_ctx {
+	u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+	u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+	u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+	unsigned int enc_key_len;
+
+	unsigned int authsize;
+	unsigned int auth_key_len;
+
+	struct crypto_priv *cp;
+};
+
+struct qcrypto_cipher_req_ctx {
+	u8 *iv;
+	unsigned int ivsize;
+	int  aead;
+	struct scatterlist asg;		/* Formatted associated data sg  */
+	unsigned char *assoc;		/* Pointer to formatted assoc data */
+	unsigned int assoclen;		/* Save Unformatted assoc data length */
+	struct scatterlist *assoc_sg;	/* Save Unformatted assoc data sg */
+	enum qce_cipher_alg_enum alg;
+	enum qce_cipher_dir_enum dir;
+	enum qce_cipher_mode_enum mode;
+};
+
+#define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE	(SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE	 SHA256_DIGEST_SIZE
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+	enum qce_hash_alg_enum  alg;
+	uint32_t		byte_count[4];
+	uint8_t			digest[SHA_MAX_DIGEST_SIZE];
+	uint32_t		diglen;
+	uint8_t			*tmp_tbuf;
+	uint8_t			*trailing_buf;
+	uint8_t			*in_buf;
+	uint32_t		authkey_in_len;
+	uint32_t		trailing_buf_len;
+	uint8_t			first_blk;
+	uint8_t			last_blk;
+	uint8_t			authkey[SHA_MAX_BLOCK_SIZE];
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+	struct scatterlist *sg;
+	struct scatterlist tmp_sg;
+	struct crypto_priv *cp;
+};
+
+struct qcrypto_sha_req_ctx {
+	union {
+		struct sha1_state sha1_state_ctx;
+		struct sha256_state sha256_state_ctx;
+	};
+	struct scatterlist *src;
+	uint32_t nbytes;
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static void _start_qcrypto_process(struct crypto_priv *cp);
+
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+		struct ahash_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(q_alg));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	q_alg->alg_type = QCRYPTO_ALG_SHA;
+	q_alg->sha_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+		struct crypto_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(q_alg));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+	q_alg->cipher_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+
+	/* update context with ptr to cp */
+	ctx->cp = q_alg->cp;
+
+	/* random first IV */
+	get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+
+	return 0;
+};
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg =	container_of(crypto_hash_alg_common(ahash),
+						struct ahash_alg, halg);
+	struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+								sha_alg);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+	/* update context with ptr to cp */
+	sha_ctx->cp = q_alg->cp;
+	sha_ctx->sg = NULL;
+	sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE +
+					SHA_MAX_DIGEST_SIZE, GFP_KERNEL);
+	if (sha_ctx->tmp_tbuf == NULL) {
+		pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n",
+			PTR_ERR(sha_ctx->tmp_tbuf));
+		return -ENOMEM;
+	}
+
+	sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL);
+	if (sha_ctx->trailing_buf == NULL) {
+		kfree(sha_ctx->tmp_tbuf);
+		sha_ctx->tmp_tbuf = NULL;
+		pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n",
+			PTR_ERR(sha_ctx->trailing_buf));
+		return -ENOMEM;
+	}
+
+	sha_ctx->ahash_req = NULL;
+	return 0;
+};
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+	kfree(sha_ctx->tmp_tbuf);
+	sha_ctx->tmp_tbuf = NULL;
+	kfree(sha_ctx->trailing_buf);
+	sha_ctx->trailing_buf = NULL;
+	if (sha_ctx->sg != NULL) {
+		kfree(sha_ctx->sg);
+		sha_ctx->sg = NULL;
+	}
+	if (sha_ctx->ahash_req != NULL) {
+		ahash_request_free(sha_ctx->ahash_req);
+		sha_ctx->ahash_req = NULL;
+	}
+};
+
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err);
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	int ret = 0;
+
+	ret = _qcrypto_ahash_cra_init(tfm);
+	if (ret)
+		return ret;
+	sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+	if (sha_ctx->ahash_req == NULL) {
+		_qcrypto_ahash_cra_exit(tfm);
+		return -ENOMEM;
+	}
+
+	init_completion(&sha_ctx->ahash_req_complete);
+	ahash_request_set_callback(sha_ctx->ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&sha_ctx->ahash_req_complete);
+	crypto_ahash_clear_flags(ahash, ~0);
+
+	return 0;
+};
+
+static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+	return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _qcrypto_cra_aead_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+	return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _disp_stats(int id)
+{
+	struct crypto_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcrypto_stat[id];
+	len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQualcomm crypto accelerator %d Statistics:\n",
+				id + 1);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK AES CIPHER encryption   : %d\n",
+					pstat->ablk_cipher_aes_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK AES CIPHER decryption   : %d\n",
+					pstat->ablk_cipher_aes_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK DES CIPHER encryption   : %d\n",
+					pstat->ablk_cipher_des_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK DES CIPHER decryption   : %d\n",
+					pstat->ablk_cipher_des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK 3DES CIPHER encryption  : %d\n",
+					pstat->ablk_cipher_3des_enc);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK 3DES CIPHER decryption  : %d\n",
+					pstat->ablk_cipher_3des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation success: %d\n",
+					pstat->ablk_cipher_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation fail   : %d\n",
+					pstat->ablk_cipher_op_fail);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES encryption      : %d\n",
+					pstat->aead_sha1_aes_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES decryption      : %d\n",
+					pstat->aead_sha1_aes_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES encryption      : %d\n",
+					pstat->aead_sha1_des_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES decryption      : %d\n",
+					pstat->aead_sha1_des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES encryption     : %d\n",
+					pstat->aead_sha1_3des_enc);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES decryption     : %d\n",
+					pstat->aead_sha1_3des_dec);
+
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation success       : %d\n",
+					pstat->aead_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation fail          : %d\n",
+					pstat->aead_op_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA1 digest			 : %d\n",
+					pstat->sha1_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA256 digest		 : %d\n",
+					pstat->sha256_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA  operation fail          : %d\n",
+					pstat->sha_op_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA  operation success          : %d\n",
+					pstat->sha_op_success);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA1 HMAC digest			 : %d\n",
+					pstat->sha1_hmac_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA256 HMAC digest		 : %d\n",
+					pstat->sha256_hmac_digest);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA HMAC operation fail          : %d\n",
+					pstat->sha_hmac_op_fail);
+	len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SHA HMAC operation success          : %d\n",
+					pstat->sha_hmac_op_success);
+	return len;
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+	struct crypto_priv *cp;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_alg *n;
+
+	cp = platform_get_drvdata(pdev);
+
+	if (!cp)
+		return 0;
+
+	list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+		if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+			crypto_unregister_alg(&q_alg->cipher_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+			crypto_unregister_ahash(&q_alg->sha_alg);
+		list_del(&q_alg->entry);
+		kfree(q_alg);
+	}
+
+	if (cp->qce)
+		qce_close(cp->qce);
+	tasklet_kill(&cp->done_tasklet);
+	kfree(cp);
+	return 0;
+};
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = len;
+	memcpy(ctx->enc_key, key, len);
+	return 0;
+};
+
+static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret = des_ekey(tmp, key);
+
+	if (len != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	ctx->enc_key_len = len;
+	memcpy(ctx->enc_key, key, len);
+	return 0;
+};
+
+static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = len;
+	memcpy(ctx->enc_key, key, len);
+	return 0;
+};
+
+static void req_done(unsigned long data)
+{
+	struct crypto_async_request *areq;
+	struct crypto_priv *cp = (struct crypto_priv *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	areq = cp->req;
+	cp->req = NULL;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (areq)
+		areq->complete(areq, cp->res);
+	_start_qcrypto_process(cp);
+};
+
+static void _update_sha1_ctx(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+
+	if (sha_ctx->last_blk == 1)
+		memset(sha_state_ctx, 0x00, sizeof(struct sha1_state));
+	else {
+		memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE);
+		memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf,
+						sha_ctx->trailing_buf_len);
+		_byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest,
+					SHA1_DIGEST_SIZE);
+	}
+	return;
+}
+
+static void _update_sha256_ctx(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+
+	if (sha_ctx->last_blk == 1)
+		memset(sha_state_ctx, 0x00, sizeof(struct sha256_state));
+	else {
+		memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE);
+		memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf,
+						sha_ctx->trailing_buf_len);
+		_byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest,
+					SHA256_DIGEST_SIZE);
+	}
+	return;
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	struct ahash_request *areq = (struct ahash_request *) cookie;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	uint32_t diglen = crypto_ahash_digestsize(ahash);
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+				areq, ret);
+#endif
+
+	if (digest) {
+		memcpy(sha_ctx->digest, digest, diglen);
+		memcpy(areq->result, digest, diglen);
+	}
+	if (authdata) {
+		sha_ctx->byte_count[0] = auth32[0];
+		sha_ctx->byte_count[1] = auth32[1];
+		sha_ctx->byte_count[2] = auth32[2];
+		sha_ctx->byte_count[3] = auth32[3];
+	}
+	areq->src = rctx->src;
+	areq->nbytes = rctx->nbytes;
+
+	if (sha_ctx->sg != NULL) {
+		kfree(sha_ctx->sg);
+		sha_ctx->sg = NULL;
+	}
+
+	if (sha_ctx->alg == QCE_HASH_SHA1)
+		_update_sha1_ctx(areq);
+	if (sha_ctx->alg == QCE_HASH_SHA256)
+		_update_sha256_ctx(areq);
+
+	sha_ctx->last_blk = 0;
+	sha_ctx->first_blk = 0;
+
+	if (ret) {
+		cp->res = -ENXIO;
+		pstat->sha_op_fail++;
+	} else {
+		cp->res = 0;
+		pstat->sha_op_success++;
+	}
+
+	if (cp->platform_support.ce_shared)
+		schedule_work(&cp->unlock_ce_ws);
+	tasklet_schedule(&cp->done_tasklet);
+};
+
+static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
+		unsigned char *iv, int ret)
+{
+	struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
+	struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+				areq, ret);
+#endif
+	if (iv)
+		memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
+
+	if (ret) {
+		cp->res = -ENXIO;
+		pstat->ablk_cipher_op_fail++;
+	} else {
+		cp->res = 0;
+		pstat->ablk_cipher_op_success++;
+	}
+	if (cp->platform_support.ce_shared)
+		schedule_work(&cp->unlock_ce_ws);
+	tasklet_schedule(&cp->done_tasklet);
+};
+
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+				unsigned char *iv, int ret)
+{
+	struct aead_request *areq = (struct aead_request *) cookie;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+
+	if (rctx->mode == QCE_MODE_CCM) {
+		kzfree(rctx->assoc);
+		areq->assoc = rctx->assoc_sg;
+		areq->assoclen = rctx->assoclen;
+		if (ret) {
+			if (ret == 0x2000000)
+				ret = -EBADMSG;
+			else
+				ret = -ENXIO;
+		}
+	} else {
+		if (ret == 0) {
+			if (rctx->dir  == QCE_ENCRYPT) {
+				/* copy the icv to dst */
+				scatterwalk_map_and_copy(icv, areq->dst,
+						areq->cryptlen,
+						ctx->authsize, 1);
+
+			} else {
+				unsigned char tmp[SHA256_DIGESTSIZE];
+
+				/* compare icv from src */
+				scatterwalk_map_and_copy(tmp,
+					areq->src, areq->cryptlen -
+					ctx->authsize, ctx->authsize, 0);
+				ret = memcmp(icv, tmp, ctx->authsize);
+				if (ret != 0)
+					ret = -EBADMSG;
+
+			}
+		} else {
+			ret = -ENXIO;
+		}
+
+		if (iv)
+			memcpy(ctx->iv, iv, crypto_aead_ivsize(aead));
+	}
+
+	if (ret)
+		pstat->aead_op_fail++;
+	else
+		pstat->aead_op_success++;
+
+	if (cp->platform_support.ce_shared)
+		schedule_work(&cp->unlock_ce_ws);
+	tasklet_schedule(&cp->done_tasklet);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
+{
+	struct aead_request *areq = (struct aead_request *) qreq->areq;
+	unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+	memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize);
+	/*
+	 * Format control info per RFC 3610 and
+	 * NIST Special Publication 800-38C
+	 */
+	qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+	if (areq->assoclen)
+		qreq->nonce[0] |= 64;
+
+	return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+						struct scatterlist *sg)
+{
+	unsigned char *adata;
+	uint32_t len, l;
+
+	qreq->assoc = kzalloc((alen + 0x64), (GFP_KERNEL | __GFP_DMA));
+	if (!qreq->assoc) {
+		pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
+				PTR_ERR(qreq->assoc));
+		return -ENOMEM;
+	}
+	adata = qreq->assoc;
+	/*
+	 * Add control info for associated data
+	 * RFC 3610 and NIST Special Publication 800-38C
+	 */
+	if (alen < 65280) {
+		*(__be16 *)adata = cpu_to_be16(alen);
+		len = 2;
+	} else {
+			if ((alen >= 65280) && (alen <= 0xffffffff)) {
+				*(__be16 *)adata = cpu_to_be16(0xfffe);
+				*(__be32 *)&adata[2] = cpu_to_be32(alen);
+				len = 6;
+		} else {
+				*(__be16 *)adata = cpu_to_be16(0xffff);
+				*(__be32 *)&adata[6] = cpu_to_be32(alen);
+				len = 10;
+		}
+	}
+	adata += len;
+	qreq->assoclen = ALIGN((alen + len), 16);
+	for (l = alen; l > 0; sg = sg_next(sg)) {
+		memcpy(adata, sg_virt(sg), sg->length);
+		l -= sg->length;
+		adata += sg->length;
+	}
+	return 0;
+}
+
+static void _start_qcrypto_process(struct crypto_priv *cp)
+{
+	struct crypto_async_request *async_req = NULL;
+	struct crypto_async_request *backlog = NULL;
+	unsigned long flags;
+	u32 type;
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct qcrypto_sha_ctx *sha_ctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+again:
+	spin_lock_irqsave(&cp->lock, flags);
+	if (cp->req == NULL) {
+		backlog = crypto_get_backlog(&cp->queue);
+		async_req = crypto_dequeue_request(&cp->queue);
+		cp->req = async_req;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (!async_req)
+		return;
+	if (backlog)
+		backlog->complete(backlog, -EINPROGRESS);
+	type = crypto_tfm_alg_type(async_req->tfm);
+
+	if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
+		struct ablkcipher_request *req;
+		struct crypto_ablkcipher *tfm;
+
+		req = container_of(async_req, struct ablkcipher_request, base);
+		cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+		rctx = ablkcipher_request_ctx(req);
+		tfm = crypto_ablkcipher_reqtfm(req);
+
+		qreq.op = QCE_REQ_ABLK_CIPHER;
+		qreq.qce_cb = _qce_ablk_cipher_complete;
+		qreq.areq = req;
+		qreq.alg = rctx->alg;
+		qreq.dir = rctx->dir;
+		qreq.mode = rctx->mode;
+		qreq.enckey = cipher_ctx->enc_key;
+		qreq.encklen = cipher_ctx->enc_key_len;
+		qreq.iv = req->info;
+		qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+		qreq.cryptlen = req->nbytes;
+		qreq.use_pmem = 0;
+
+		if ((cipher_ctx->enc_key_len == 0) &&
+				(cp->platform_support.hw_key_support == 0))
+			ret = -EINVAL;
+		else
+			ret =  qce_ablk_cipher_req(cp->qce, &qreq);
+	} else {
+		if (type == CRYPTO_ALG_TYPE_AHASH) {
+
+			struct ahash_request *req;
+			struct qce_sha_req sreq;
+
+			req = container_of(async_req,
+						struct ahash_request, base);
+			sha_ctx = crypto_tfm_ctx(async_req->tfm);
+
+			sreq.qce_cb = _qce_ahash_complete;
+			sreq.digest =  &sha_ctx->digest[0];
+			sreq.src = req->src;
+			sreq.auth_data[0] = sha_ctx->byte_count[0];
+			sreq.auth_data[1] = sha_ctx->byte_count[1];
+			sreq.auth_data[2] = sha_ctx->byte_count[2];
+			sreq.auth_data[3] = sha_ctx->byte_count[3];
+			sreq.first_blk = sha_ctx->first_blk;
+			sreq.last_blk = sha_ctx->last_blk;
+			sreq.size = req->nbytes;
+			sreq.areq = req;
+
+			switch (sha_ctx->alg) {
+			case QCE_HASH_SHA1:
+				sreq.alg = QCE_HASH_SHA1;
+				sreq.authkey = NULL;
+				break;
+			case QCE_HASH_SHA256:
+				sreq.alg = QCE_HASH_SHA256;
+				sreq.authkey = NULL;
+				break;
+			case QCE_HASH_SHA1_HMAC:
+				sreq.alg = QCE_HASH_SHA1_HMAC;
+				sreq.authkey = &sha_ctx->authkey[0];
+				break;
+			case QCE_HASH_SHA256_HMAC:
+				sreq.alg = QCE_HASH_SHA256_HMAC;
+				sreq.authkey = &sha_ctx->authkey[0];
+				break;
+			default:
+				break;
+			};
+			ret =  qce_process_sha_req(cp->qce, &sreq);
+
+		} else {
+			struct aead_request *req = container_of(async_req,
+						struct aead_request, base);
+			struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+			rctx = aead_request_ctx(req);
+			cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+			qreq.op = QCE_REQ_AEAD;
+			qreq.qce_cb = _qce_aead_complete;
+
+			qreq.areq = req;
+			qreq.alg = rctx->alg;
+			qreq.dir = rctx->dir;
+			qreq.mode = rctx->mode;
+			qreq.iv = rctx->iv;
+
+			qreq.enckey = cipher_ctx->enc_key;
+			qreq.encklen = cipher_ctx->enc_key_len;
+			qreq.authkey = cipher_ctx->auth_key;
+			qreq.authklen = cipher_ctx->auth_key_len;
+			qreq.authsize = crypto_aead_authsize(aead);
+			qreq.ivsize =  crypto_aead_ivsize(aead);
+			if (qreq.mode == QCE_MODE_CCM) {
+				if (qreq.dir == QCE_ENCRYPT)
+					qreq.cryptlen = req->cryptlen;
+				else
+					qreq.cryptlen = req->cryptlen -
+								qreq.authsize;
+				/* Get NONCE */
+				ret = qccrypto_set_aead_ccm_nonce(&qreq);
+				if (ret)
+					goto done;
+				/* Format Associated data    */
+				ret = qcrypto_aead_ccm_format_adata(&qreq,
+								req->assoclen,
+								req->assoc);
+				if (ret)
+					goto done;
+				/*
+				 * Save the original associated data
+				 * length and sg
+				 */
+				rctx->assoc_sg  = req->assoc;
+				rctx->assoclen  = req->assoclen;
+				rctx->assoc  = qreq.assoc;
+				/*
+				 * update req with new formatted associated
+				 * data info
+				 */
+				req->assoc = &rctx->asg;
+				req->assoclen = qreq.assoclen;
+				sg_set_buf(req->assoc, qreq.assoc,
+							req->assoclen);
+				sg_mark_end(req->assoc);
+			}
+			ret =  qce_aead_req(cp->qce, &qreq);
+		}
+	};
+done:
+	if (ret) {
+
+		spin_lock_irqsave(&cp->lock, flags);
+		cp->req = NULL;
+		spin_unlock_irqrestore(&cp->lock, flags);
+
+		if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
+			pstat->ablk_cipher_op_fail++;
+		else
+			if (type == CRYPTO_ALG_TYPE_AHASH)
+				pstat->sha_op_fail++;
+			else
+				pstat->aead_op_fail++;
+
+		async_req->complete(async_req, ret);
+		goto again;
+	};
+};
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+				struct crypto_async_request *req)
+{
+	int ret;
+	unsigned long flags;
+
+	if (cp->platform_support.ce_shared) {
+		ret = qcrypto_lock_ce(cp);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irqsave(&cp->lock, flags);
+	ret = crypto_enqueue_request(&cp->queue, req);
+	spin_unlock_irqrestore(&cp->lock, flags);
+	_start_qcrypto_process(cp);
+
+	return ret;
+}
+
+static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_XTS;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+#endif
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+#endif
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_CTR;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_XTS;
+	rctx->dir = QCE_DECRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+};
+
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+				unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct rtattr *rta = (struct rtattr *)key;
+	struct crypto_authenc_key_param *param;
+
+	if (!RTA_OK(rta, keylen))
+		goto badkey;
+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+		goto badkey;
+	if (RTA_PAYLOAD(rta) < sizeof(*param))
+		goto badkey;
+
+	param = RTA_DATA(rta);
+	ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < ctx->enc_key_len)
+		goto badkey;
+
+	ctx->auth_key_len = keylen - ctx->enc_key_len;
+	if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+				ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+		goto badkey;
+	memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+	memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+	memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+	return 0;
+badkey:
+	ctx->enc_key_len = 0;
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+			unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		ctx->enc_key_len = 0;
+		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	ctx->auth_key_len = keylen;
+	memcpy(ctx->auth_key, key, keylen);
+
+	return 0;
+}
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+#endif
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+#endif
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+}
+
+#ifdef QCRYPTO_AEAD_AES_CTR
+static int _qcrypto_aead_encrypt_aes_ctr(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	rctx->mode = QCE_MODE_CTR;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_aes_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_aes_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+};
+#endif /* QCRYPTO_AEAD_AES_CTR */
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_des_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_3des_enc++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	pstat->aead_sha1_3des_dec++;
+	return _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
+{
+	struct aead_request *areq = &req->areq;
+	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	rctx = aead_request_ctx(areq);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->giv;	/* generated iv */
+
+	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+	 /* avoid consecutive packets going out with same IV */
+	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
+	pstat->aead_sha1_3des_enc++;
+	return _qcrypto_queue_req(cp, &areq->base);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+
+	return i;
+}
+
+static int _sha_init(struct qcrypto_sha_ctx *ctx)
+{
+	ctx->first_blk = 1;
+	ctx->last_blk = 0;
+	ctx->byte_count[0] = 0;
+	ctx->byte_count[1] = 0;
+	ctx->byte_count[2] = 0;
+	ctx->byte_count[3] = 0;
+	ctx->trailing_buf_len = 0;
+
+	return 0;
+};
+
+static int _sha1_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	_sha_init(sha_ctx);
+	sha_ctx->alg = QCE_HASH_SHA1;
+
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	_update_sha1_ctx(req);
+
+	pstat->sha1_digest++;
+	return 0;
+};
+
+static int _sha256_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+
+	_sha_init(sha_ctx);
+	sha_ctx->alg = QCE_HASH_SHA256;
+
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	_update_sha256_ctx(req);
+
+	pstat->sha256_digest++;
+	return 0;
+};
+
+
+static int _sha1_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+	out_ctx->count = sha_state_ctx->count;
+	memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
+	memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha1_import(struct ahash_request  *req, const void *in)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct sha1_state *in_ctx = (struct sha1_state *)in;
+
+	sha_state_ctx->count = in_ctx->count;
+	memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
+	memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE);
+	memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+
+	sha_ctx->byte_count[0] =  (uint32_t)(in_ctx->count & 0xFFFFFFC0);
+	sha_ctx->byte_count[1] =  (uint32_t)(in_ctx->count >> 32);
+	_words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
+
+	sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA1_BLOCK_SIZE-1));
+
+	if (!(in_ctx->count))
+		sha_ctx->first_blk = 1;
+	else
+		sha_ctx->first_blk = 0;
+
+	return 0;
+}
+static int _sha256_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+	out_ctx->count = sha_state_ctx->count;
+	memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
+	memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha256_import(struct ahash_request  *req, const void *in)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct sha256_state *in_ctx = (struct sha256_state *)in;
+
+	sha_state_ctx->count = in_ctx->count;
+	memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
+	memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+	memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+	sha_ctx->byte_count[0] =  (uint32_t)(in_ctx->count & 0xFFFFFFC0);
+	sha_ctx->byte_count[1] =  (uint32_t)(in_ctx->count >> 32);
+	_words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
+
+	sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA256_BLOCK_SIZE-1));
+
+	if (!(in_ctx->count))
+		sha_ctx->first_blk = 1;
+	else
+		sha_ctx->first_blk = 0;
+
+	return 0;
+}
+
+
+static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint32_t total, len, i, num_sg;
+	uint8_t *k_src = NULL;
+	uint32_t sha_pad_len = 0;
+	uint32_t end_src = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t nbytes, index = 0;
+	uint32_t saved_length = 0;
+	int ret = 0;
+
+	/* check for trailing buffer from previous updates and append it */
+	total = req->nbytes + sha_ctx->trailing_buf_len;
+	len = req->nbytes;
+
+	if (total <= sha_block_size) {
+		i = 0;
+
+		k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len];
+		while (len > 0) {
+			memcpy(k_src, sg_virt(&req->src[i]),
+							req->src[i].length);
+			len -= req->src[i].length;
+			k_src += req->src[i].length;
+			i++;
+		}
+		sha_ctx->trailing_buf_len = total;
+		if (sha_ctx->alg == QCE_HASH_SHA1)
+			_update_sha1_ctx(req);
+		if (sha_ctx->alg == QCE_HASH_SHA256)
+			_update_sha256_ctx(req);
+		return 0;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf,
+					sha_ctx->trailing_buf_len);
+	k_src = &sha_ctx->trailing_buf[0];
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, sha_block_size) - total;
+	trailing_buf_len =  sha_block_size - sha_pad_len;
+	nbytes = total - trailing_buf_len;
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+	len = sha_ctx->trailing_buf_len;
+	i = 0;
+
+	while (len < nbytes) {
+		if ((len + req->src[i].length) > nbytes)
+			break;
+		len += req->src[i].length;
+		i++;
+	}
+
+	end_src = i;
+	if (len < nbytes) {
+		uint32_t remnant = (nbytes - len);
+		memcpy(k_src, (sg_virt(&req->src[i]) + remnant),
+				(req->src[i].length - remnant));
+		k_src += (req->src[i].length - remnant);
+		saved_length = req->src[i].length;
+		index = i;
+		req->src[i].length = remnant;
+		i++;
+	}
+
+	while (i < num_sg) {
+		memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length);
+		k_src += req->src[i].length;
+		i++;
+	}
+
+	if (sha_ctx->trailing_buf_len) {
+		num_sg = end_src + 2;
+		sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)),
+								GFP_KERNEL);
+		if (sha_ctx->sg == NULL) {
+			pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n",
+				PTR_ERR(sha_ctx->sg));
+			return -ENOMEM;
+		}
+
+		sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+						sha_ctx->trailing_buf_len);
+		for (i = 1; i < num_sg; i++)
+			sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]),
+							req->src[i-1].length);
+
+		req->src = sha_ctx->sg;
+		sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+	} else
+		sg_mark_end(&req->src[end_src]);
+
+	req->nbytes = nbytes;
+	if (saved_length > 0)
+		rctx->src[index].length = saved_length;
+	sha_ctx->trailing_buf_len = trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, &req->base);
+
+	return ret;
+};
+
+static int _sha1_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+
+	sha_state_ctx->count += req->nbytes;
+	return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+
+	sha_state_ctx->count += req->nbytes;
+	return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int ret = 0;
+
+	sha_ctx->last_blk = 1;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
+					sha_ctx->trailing_buf_len);
+	sg_mark_end(&sha_ctx->tmp_sg);
+
+	req->src = &sha_ctx->tmp_sg;
+	req->nbytes = sha_ctx->trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, &req->base);
+
+	return ret;
+};
+
+static int _sha1_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	sha_ctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, &req->base);
+
+	return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+	_sha1_init(req);
+	return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+	_sha256_init(req);
+	return _sha_digest(req);
+}
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err)
+{
+	struct completion *ahash_req_complete = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	int ret = 0;
+
+	sha_ctx->in_buf = kzalloc(len, GFP_KERNEL);
+	if (sha_ctx->in_buf == NULL) {
+		pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
+		PTR_ERR(sha_ctx->in_buf));
+		return -ENOMEM;
+	}
+	memcpy(sha_ctx->in_buf, key, len);
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len);
+	sg_mark_end(&sha_ctx->tmp_sg);
+
+	ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg,
+				&sha_ctx->authkey[0], len);
+
+	ret = _sha_digest(sha_ctx->ahash_req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+			wait_for_completion_interruptible(
+						&sha_ctx->ahash_req_complete);
+		INIT_COMPLETION(sha_ctx->ahash_req_complete);
+	}
+
+	sha_ctx->authkey_in_len = len;
+	kfree(sha_ctx->in_buf);
+	sha_ctx->in_buf = NULL;
+
+	return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	if (len <= SHA1_BLOCK_SIZE)
+		memcpy(&sha_ctx->authkey[0], key, len);
+	else {
+		_sha_init(sha_ctx);
+		sha_ctx->alg = QCE_HASH_SHA1;
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+	}
+	return 0;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	if (len <= SHA256_BLOCK_SIZE)
+		memcpy(&sha_ctx->authkey[0], key, len);
+	else {
+		_sha_init(sha_ctx);
+		sha_ctx->alg = QCE_HASH_SHA256;
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+	}
+
+	return 0;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+						uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+	sha_ctx->trailing_buf_len = sha_block_size;
+
+	return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	_update_sha1_ctx(req);
+
+	if (cp->ce_support.sha_hmac)
+			sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	_update_sha256_ctx(req);
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+	return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+	return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+		uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0],
+						 sha_digest_size);
+
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size +
+							sha_digest_size);
+	sg_mark_end(&sha_ctx->tmp_sg);
+	req->src = &sha_ctx->tmp_sg;
+	req->nbytes = sha_block_size + sha_digest_size;
+
+	_sha_init(sha_ctx);
+	if (sha_ctx->alg == QCE_HASH_SHA1) {
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+							SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	}
+
+	sha_ctx->last_blk = 1;
+	return  _qcrypto_queue_req(cp, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+			uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ahash_request *areq = sha_ctx->ahash_req;
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	sha_ctx->last_blk = 1;
+
+	sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
+					sha_ctx->trailing_buf_len);
+	sg_mark_end(&sha_ctx->tmp_sg);
+
+	ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0],
+						sha_ctx->trailing_buf_len);
+	sha_ctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, &areq->base);
+
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+		wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+		INIT_COMPLETION(sha_ctx->ahash_req_complete);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA1_BLOCK_SIZE);
+	else {
+		ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE,
+							SHA1_BLOCK_SIZE);
+		if (ret)
+			return ret;
+		return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE,
+							SHA1_BLOCK_SIZE);
+	}
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA256_BLOCK_SIZE);
+	else {
+		ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE,
+							SHA256_BLOCK_SIZE);
+		if (ret)
+			return ret;
+		return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE,
+							SHA256_BLOCK_SIZE);
+	}
+	return 0;
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat[cp->pdev->id];
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(sha_ctx);
+	memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+	return _sha_digest(req);
+}
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+	{
+		.init		=	_sha1_init,
+		.update		=	_sha1_update,
+		.final		=	_sha1_final,
+		.export		=	_sha1_export,
+		.import		=	_sha1_import,
+		.digest		=	_sha1_digest,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "sha1",
+				.cra_driver_name = "qcrypto-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_init,
+		.update		=	_sha256_update,
+		.final		=	_sha256_final,
+		.export		=	_sha256_export,
+		.import		=	_sha256_import,
+		.digest		=	_sha256_digest,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "sha256",
+				.cra_driver_name = "qcrypto-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+	{
+		.init		=	_sha1_hmac_init,
+		.update		=	_sha1_hmac_update,
+		.final		=	_sha1_hmac_final,
+		.export		=	_sha1_export,
+		.import		=	_sha1_import,
+		.digest		=	_sha1_hmac_digest,
+		.setkey		=	_sha1_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "hmac(sha1)",
+				.cra_driver_name = "qcrypto-hmac-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_hmac_init,
+		.update		=	_sha256_hmac_update,
+		.final		=	_sha256_hmac_final,
+		.export		=	_sha256_export,
+		.import		=	_sha256_import,
+		.digest		=	_sha256_hmac_digest,
+		.setkey		=	_sha256_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "hmac(sha256)",
+				.cra_driver_name = "qcrypto-hmac-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
+	{
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "qcrypto-ecb-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ecb,
+				.decrypt	= _qcrypto_dec_aes_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(aes)",
+		.cra_driver_name = "qcrypto-cbc-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_cbc,
+				.decrypt	= _qcrypto_dec_aes_cbc,
+			},
+		},
+	},
+	{
+		.cra_name	= "ctr(aes)",
+		.cra_driver_name = "qcrypto-ctr-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ctr,
+				.decrypt	= _qcrypto_dec_aes_ctr,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des)",
+		.cra_driver_name	= "qcrypto-ecb-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_ecb,
+				.decrypt	= _qcrypto_dec_des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des)",
+		.cra_driver_name = "qcrypto-cbc-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES_BLOCK_SIZE,
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_cbc,
+				.decrypt	= _qcrypto_dec_des_cbc,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des3_ede)",
+		.cra_driver_name	= "qcrypto-ecb-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_ecb,
+				.decrypt	= _qcrypto_dec_3des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des3_ede)",
+		.cra_driver_name = "qcrypto-cbc-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES3_EDE_BLOCK_SIZE,
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_cbc,
+				.decrypt	= _qcrypto_dec_3des_cbc,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
+	.cra_name	= "xts(aes)",
+	.cra_driver_name = "qcrypto-xts-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_ablkcipher_init,
+	.cra_u		= {
+		.ablkcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= _qcrypto_setkey_aes,
+			.encrypt	= _qcrypto_enc_aes_xts,
+			.decrypt	= _qcrypto_dec_aes_xts,
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
+	{
+		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+				.givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+
+#ifdef QCRYPTO_AEAD_AES_CTR
+	{
+		.cra_name	= "authenc(hmac(sha1),ctr(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-ctr-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_ctr,
+				.decrypt = _qcrypto_aead_decrypt_aes_ctr,
+				.givencrypt = _qcrypto_aead_givencrypt_aes_ctr,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+#endif /* QCRYPTO_AEAD_AES_CTR */
+	{
+		.cra_name	= "authenc(hmac(sha1),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+				.givencrypt = _qcrypto_aead_givencrypt_des_cbc,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+	{
+		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_aead_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aead_init,
+		.cra_u		= {
+			.aead = {
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+				.givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
+				.geniv = "<built-in>",
+			}
+		}
+	},
+};
+
+static struct crypto_alg _qcrypto_aead_ccm_algo = {
+	.cra_name	= "ccm(aes)",
+	.cra_driver_name = "qcrypto-aes-ccm",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize  = AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_aead_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_aead_init,
+	.cra_u		= {
+		.aead = {
+			.ivsize         = AES_BLOCK_SIZE,
+			.maxauthsize    = SHA1_DIGEST_SIZE,
+			.setkey = _qcrypto_aead_ccm_setkey,
+			.setauthsize = _qcrypto_aead_ccm_setauthsize,
+			.encrypt = _qcrypto_aead_encrypt_aes_ccm,
+			.decrypt = _qcrypto_aead_decrypt_aes_ccm,
+			.geniv = "<built-in>",
+		}
+	}
+};
+
+
+static int  _qcrypto_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	void *handle;
+	struct crypto_priv *cp;
+	int i;
+	struct msm_ce_hw_support *platform_support;
+
+	if (pdev->id >= MAX_CRYPTO_DEVICE) {
+		printk(KERN_ERR "%s: device id %d  exceeds allowed %d\n",
+				__func__, pdev->id, MAX_CRYPTO_DEVICE);
+		return -ENOENT;
+	}
+
+	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+	if (!cp) {
+		pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
+				PTR_ERR(cp));
+		return -ENOMEM;
+	}
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		kfree(cp);
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	INIT_LIST_HEAD(&cp->alg_list);
+	platform_set_drvdata(pdev, cp);
+	spin_lock_init(&cp->lock);
+	tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp);
+	crypto_init_queue(&cp->queue, 50);
+	cp->qce = handle;
+	cp->pdev = pdev;
+	qce_hw_support(cp->qce, &cp->ce_support);
+	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
+	cp->platform_support.ce_shared = platform_support->ce_shared;
+	cp->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+	cp->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+	cp->ce_lock_count = 0;
+	cp->platform_support.sha_hmac = platform_support->sha_hmac;
+
+	if (cp->platform_support.ce_shared)
+		INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
+
+	/* register crypto cipher algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_algos[i]);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	if (cp->ce_support.aes_xts) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_xts_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/*
+	 * Register crypto hash (sha1 and sha256) algorithms the
+	 * device supports
+	 */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+		struct qcrypto_alg *q_alg = NULL;
+
+		q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+
+		rc = crypto_register_ahash(&q_alg->sha_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+		}
+	}
+
+	/* register crypto aead (hmac-sha1) algorithms the device supports */
+	if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_aead_sha1_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+
+			rc = crypto_register_alg(&q_alg->cipher_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+			}
+		}
+	}
+
+	if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+		/* register crypto hmac algorithms the device supports */
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+			struct qcrypto_alg *q_alg = NULL;
+
+			q_alg = _qcrypto_sha_alg_alloc(cp,
+						&_qcrypto_sha_hmac_algos[i]);
+
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+
+			rc = crypto_register_ahash(&q_alg->sha_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+				"%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			}
+		}
+	}
+	/*
+	 * Register crypto cipher (aes-ccm) algorithms the
+	 * device supports
+	 */
+	if (cp->ce_support.aes_ccm) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	return 0;
+err:
+	_qcrypto_remove(pdev);
+	return rc;
+};
+
+static struct platform_driver _qualcomm_crypto = {
+	.probe          = _qcrypto_probe,
+	.remove         = _qcrypto_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "qcrypto",
+	},
+};
+
+static int _debug_qcrypto[MAX_CRYPTO_DEVICE];
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcrypto = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcrypto);
+
+	rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+
+	int qcrypto = *((int *) file->private_data);
+
+	memset((char *)&_qcrypto_stat[qcrypto], 0, sizeof(struct crypto_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	int i;
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcrypto", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	for (i = 0; i < MAX_CRYPTO_DEVICE; i++) {
+		snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
+		_debug_qcrypto[i] = i;
+		dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcrypto[i], &_debug_stats_ops);
+		if (dent == NULL) {
+			pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+			rc = PTR_ERR(dent);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+	int rc;
+
+	rc = _qcrypto_debug_init();
+	if (rc)
+		return rc;
+
+	return platform_driver_register(&_qualcomm_crypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+	printk(KERN_ALERT "%s Unregister QCRYPTO\n", __func__);
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&_qualcomm_crypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Crypto driver");
+MODULE_VERSION("1.18");