blob: fecce3f14282f5d10476e76eb5b089c4fc7f9a04 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm CE device driver.
2 *
Mona Hossain313f4ec2012-03-06 13:46:14 -08003 * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/mman.h>
15#include <linux/android_pmem.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <crypto/hash.h>
32#include <linux/platform_data/qcom_crypto_device.h>
33#include <mach/scm.h>
Ramesh Masavarapu49259682011-12-02 14:00:18 -080034#include <mach/msm_bus.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070035#include <linux/qcedev.h>
36#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38
39#define CACHE_LINE_SIZE 32
40#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
41
42static uint8_t _std_init_vector_sha1_uint8[] = {
43 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
44 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
45 0xC3, 0xD2, 0xE1, 0xF0
46};
47/* standard initialization vector for SHA-256, source: FIPS 180-2 */
48static uint8_t _std_init_vector_sha256_uint8[] = {
49 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
50 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
51 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
52 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
53};
54
55enum qcedev_crypto_oper_type {
56 QCEDEV_CRYPTO_OPER_CIPHER = 0,
57 QCEDEV_CRYPTO_OPER_SHA = 1,
58 QCEDEV_CRYPTO_OPER_LAST
59};
60
Mona Hossain087c60b2011-07-20 10:34:57 -070061struct qcedev_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
63struct qcedev_cipher_req {
64 struct ablkcipher_request creq;
65 void *cookie;
66};
67
68struct qcedev_sha_req {
69 struct ahash_request sreq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070 void *cookie;
71};
72
Mona Hossain087c60b2011-07-20 10:34:57 -070073struct qcedev_sha_ctxt {
74 uint32_t auth_data[4];
75 uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
76 uint32_t diglen;
77 uint8_t trailing_buf[64];
78 uint32_t trailing_buf_len;
79 uint8_t first_blk;
80 uint8_t last_blk;
81 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
82};
83
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct qcedev_async_req {
85 struct list_head list;
86 struct completion complete;
87 enum qcedev_crypto_oper_type op_type;
88 union {
89 struct qcedev_cipher_op_req cipher_op_req;
90 struct qcedev_sha_op_req sha_op_req;
91 };
92 union{
93 struct qcedev_cipher_req cipher_req;
94 struct qcedev_sha_req sha_req;
95 };
Mona Hossain087c60b2011-07-20 10:34:57 -070096 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 int err;
98};
99
Mona Hossain650c22c2011-07-19 09:54:19 -0700100static DEFINE_MUTEX(send_cmd_lock);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800101static DEFINE_MUTEX(sent_bw_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102/**********************************************************************
103 * Register ourselves as a misc device to be able to access the dev driver
104 * from userspace. */
105
106
107#define QCEDEV_DEV "qcedev"
108
109struct qcedev_control{
110
111 /* CE features supported by platform */
112 struct msm_ce_hw_support platform_support;
113
Mona Hossain650c22c2011-07-19 09:54:19 -0700114 uint32_t ce_lock_count;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800115 uint32_t high_bw_req_count;
116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117 /* CE features/algorithms supported by HW engine*/
118 struct ce_hw_support ce_support;
119
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800120 uint32_t bus_scale_handle;
121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 /* misc device */
123 struct miscdevice miscdevice;
124
125 /* qce handle */
126 void *qce;
127
128 /* platform device */
129 struct platform_device *pdev;
130
131 unsigned magic;
132
133 struct list_head ready_commands;
134 struct qcedev_async_req *active_command;
135 spinlock_t lock;
136 struct tasklet_struct done_tasklet;
137};
138
Mona Hossain087c60b2011-07-20 10:34:57 -0700139struct qcedev_handle {
140 /* qcedev control handle */
141 struct qcedev_control *cntl;
142 /* qce internal sha context*/
143 struct qcedev_sha_ctxt sha_ctxt;
144};
145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146/*-------------------------------------------------------------------------
147* Resource Locking Service
148* ------------------------------------------------------------------------*/
149#define QCEDEV_CMD_ID 1
150#define QCEDEV_CE_LOCK_CMD 1
151#define QCEDEV_CE_UNLOCK_CMD 0
152#define NUM_RETRY 1000
153#define CE_BUSY 55
154
155static int qcedev_scm_cmd(int resource, int cmd, int *response)
156{
157#ifdef CONFIG_MSM_SCM
158
159 struct {
160 int resource;
161 int cmd;
162 } cmd_buf;
163
164 cmd_buf.resource = resource;
165 cmd_buf.cmd = cmd;
166
167 return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
168 sizeof(cmd_buf), response, sizeof(*response));
169
170#else
171 return 0;
172#endif
173}
174
Mona Hossain313f4ec2012-03-06 13:46:14 -0800175static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800176 bool high_bw_req)
177{
178 int ret = 0;
179
180 mutex_lock(&sent_bw_req);
181 if (high_bw_req) {
182 if (podev->high_bw_req_count == 0)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800183 ret = msm_bus_scale_client_update_request(
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800184 podev->bus_scale_handle, 1);
Mona Hossain313f4ec2012-03-06 13:46:14 -0800185 if (ret)
186 pr_err("%s Unable to set to high bandwidth\n",
187 __func__);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800188 podev->high_bw_req_count++;
189 } else {
190 if (podev->high_bw_req_count == 1)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800191 ret = msm_bus_scale_client_update_request(
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800192 podev->bus_scale_handle, 0);
Mona Hossain313f4ec2012-03-06 13:46:14 -0800193 if (ret)
194 pr_err("%s Unable to set to low bandwidth\n",
195 __func__);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800196 podev->high_bw_req_count--;
197 }
198 mutex_unlock(&sent_bw_req);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800199}
200
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static int qcedev_unlock_ce(struct qcedev_control *podev)
203{
Mona Hossain650c22c2011-07-19 09:54:19 -0700204 int ret = 0;
205
206 mutex_lock(&send_cmd_lock);
207 if (podev->ce_lock_count == 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 int response = 0;
209
210 if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
211 QCEDEV_CE_UNLOCK_CMD, &response)) {
Mona Hossain650c22c2011-07-19 09:54:19 -0700212 pr_err("Failed to release CE lock\n");
213 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700216 if (ret == 0) {
217 if (podev->ce_lock_count)
218 podev->ce_lock_count--;
219 else {
220 /* We should never be here */
221 ret = -EIO;
222 pr_err("CE hardware is already unlocked\n");
223 }
224 }
225 mutex_unlock(&send_cmd_lock);
226
227 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228}
229
230static int qcedev_lock_ce(struct qcedev_control *podev)
231{
Mona Hossain650c22c2011-07-19 09:54:19 -0700232 int ret = 0;
233
234 mutex_lock(&send_cmd_lock);
235 if (podev->ce_lock_count == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 int response = -CE_BUSY;
237 int i = 0;
238
239 do {
240 if (qcedev_scm_cmd(
241 podev->platform_support.shared_ce_resource,
242 QCEDEV_CE_LOCK_CMD, &response)) {
243 response = -EINVAL;
244 break;
245 }
246 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
247
Mona Hossain650c22c2011-07-19 09:54:19 -0700248 if ((response == -CE_BUSY) && (i >= NUM_RETRY)) {
249 ret = -EUSERS;
250 } else {
251 if (response < 0)
252 ret = -EINVAL;
253 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700255 if (ret == 0)
256 podev->ce_lock_count++;
257 mutex_unlock(&send_cmd_lock);
258 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259}
260
261#define QCEDEV_MAGIC 0x56434544 /* "qced" */
262
263static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
264static int qcedev_open(struct inode *inode, struct file *file);
265static int qcedev_release(struct inode *inode, struct file *file);
266static int start_cipher_req(struct qcedev_control *podev);
Mona Hossain650c22c2011-07-19 09:54:19 -0700267static int start_sha_req(struct qcedev_control *podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268
269static const struct file_operations qcedev_fops = {
270 .owner = THIS_MODULE,
271 .unlocked_ioctl = qcedev_ioctl,
272 .open = qcedev_open,
273 .release = qcedev_release,
274};
275
276static struct qcedev_control qce_dev[] = {
277 {
278 .miscdevice = {
279 .minor = MISC_DYNAMIC_MINOR,
280 .name = "qce",
281 .fops = &qcedev_fops,
282 },
283 .magic = QCEDEV_MAGIC,
284 },
285};
286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
288#define DEBUG_MAX_FNAME 16
289#define DEBUG_MAX_RW_BUF 1024
290
291struct qcedev_stat {
292 u32 qcedev_dec_success;
293 u32 qcedev_dec_fail;
294 u32 qcedev_enc_success;
295 u32 qcedev_enc_fail;
296 u32 qcedev_sha_success;
297 u32 qcedev_sha_fail;
298};
299
300static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
301static struct dentry *_debug_dent;
302static char _debug_read_buf[DEBUG_MAX_RW_BUF];
303static int _debug_qcedev[MAX_QCE_DEVICE];
304
305static struct qcedev_control *qcedev_minor_to_control(unsigned n)
306{
307 int i;
308
309 for (i = 0; i < MAX_QCE_DEVICE; i++) {
310 if (qce_dev[i].miscdevice.minor == n)
311 return &qce_dev[i];
312 }
313 return NULL;
314}
315
316static int qcedev_open(struct inode *inode, struct file *file)
317{
Mona Hossain087c60b2011-07-20 10:34:57 -0700318 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 struct qcedev_control *podev;
320
321 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
322 if (podev == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700323 pr_err("%s: no such device %d\n", __func__,
324 MINOR(inode->i_rdev));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325 return -ENOENT;
326 }
327
Mona Hossain087c60b2011-07-20 10:34:57 -0700328 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
329 if (handle == NULL) {
330 pr_err("Failed to allocate memory %ld\n",
331 PTR_ERR(handle));
332 return -ENOMEM;
333 }
334
335 handle->cntl = podev;
336 file->private_data = handle;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800337 if (podev->platform_support.bus_scale_table != NULL)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800338 qcedev_ce_high_bw_req(podev, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 return 0;
340}
341
342static int qcedev_release(struct inode *inode, struct file *file)
343{
344 struct qcedev_control *podev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700345 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346
Mona Hossain087c60b2011-07-20 10:34:57 -0700347 handle = file->private_data;
348 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700350 pr_err("%s: invalid handle %p\n",
351 __func__, podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700353 kzfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 file->private_data = NULL;
Ramesh Masavarapuda92ace2012-06-06 08:06:05 -0700355 if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800356 qcedev_ce_high_bw_req(podev, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 return 0;
358}
359
360static void req_done(unsigned long data)
361{
362 struct qcedev_control *podev = (struct qcedev_control *)data;
363 struct qcedev_async_req *areq;
364 unsigned long flags = 0;
365 struct qcedev_async_req *new_req = NULL;
366 int ret = 0;
367
368 spin_lock_irqsave(&podev->lock, flags);
369 areq = podev->active_command;
370 podev->active_command = NULL;
371
372again:
373 if (!list_empty(&podev->ready_commands)) {
374 new_req = container_of(podev->ready_commands.next,
375 struct qcedev_async_req, list);
376 list_del(&new_req->list);
377 podev->active_command = new_req;
378 new_req->err = 0;
379 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
380 ret = start_cipher_req(podev);
381 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700382 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 }
384
385 spin_unlock_irqrestore(&podev->lock, flags);
386
387 if (areq)
388 complete(&areq->complete);
389
390 if (new_req && ret) {
391 complete(&new_req->complete);
392 spin_lock_irqsave(&podev->lock, flags);
393 podev->active_command = NULL;
394 areq = NULL;
395 ret = 0;
396 new_req = NULL;
397 goto again;
398 }
399
400 return;
401}
402
403static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
404 unsigned char *authdata, int ret)
405{
406 struct qcedev_sha_req *areq;
407 struct qcedev_control *pdev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700408 struct qcedev_handle *handle;
409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410 uint32_t *auth32 = (uint32_t *)authdata;
411
412 areq = (struct qcedev_sha_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700413 handle = (struct qcedev_handle *) areq->cookie;
414 pdev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415
416 if (digest)
Mona Hossain087c60b2011-07-20 10:34:57 -0700417 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
419 if (authdata) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700420 handle->sha_ctxt.auth_data[0] = auth32[0];
421 handle->sha_ctxt.auth_data[1] = auth32[1];
422 handle->sha_ctxt.auth_data[2] = auth32[2];
423 handle->sha_ctxt.auth_data[3] = auth32[3];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 }
425
426 tasklet_schedule(&pdev->done_tasklet);
427};
428
429
430static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
431 unsigned char *iv, int ret)
432{
433 struct qcedev_cipher_req *areq;
Mona Hossain087c60b2011-07-20 10:34:57 -0700434 struct qcedev_handle *handle;
435 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 struct qcedev_async_req *qcedev_areq;
437
438 areq = (struct qcedev_cipher_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700439 handle = (struct qcedev_handle *) areq->cookie;
440 podev = handle->cntl;
441 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442
443 if (iv)
444 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
445 qcedev_areq->cipher_op_req.ivlen);
Mona Hossain087c60b2011-07-20 10:34:57 -0700446 tasklet_schedule(&podev->done_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447};
448
449static int start_cipher_req(struct qcedev_control *podev)
450{
451 struct qcedev_async_req *qcedev_areq;
452 struct qce_req creq;
453 int ret = 0;
454
455 /* start the command on the podev->active_command */
456 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457
Mona Hossain087c60b2011-07-20 10:34:57 -0700458 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
460 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
461 creq.pmem = &qcedev_areq->cipher_op_req.pmem;
462 else
463 creq.pmem = NULL;
464
465 switch (qcedev_areq->cipher_op_req.alg) {
466 case QCEDEV_ALG_DES:
467 creq.alg = CIPHER_ALG_DES;
468 break;
469 case QCEDEV_ALG_3DES:
470 creq.alg = CIPHER_ALG_3DES;
471 break;
472 case QCEDEV_ALG_AES:
473 creq.alg = CIPHER_ALG_AES;
474 break;
475 default:
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -0700476 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 };
478
479 switch (qcedev_areq->cipher_op_req.mode) {
480 case QCEDEV_AES_MODE_CBC:
481 case QCEDEV_DES_MODE_CBC:
482 creq.mode = QCE_MODE_CBC;
483 break;
484 case QCEDEV_AES_MODE_ECB:
485 case QCEDEV_DES_MODE_ECB:
486 creq.mode = QCE_MODE_ECB;
487 break;
488 case QCEDEV_AES_MODE_CTR:
489 creq.mode = QCE_MODE_CTR;
490 break;
491 case QCEDEV_AES_MODE_XTS:
492 creq.mode = QCE_MODE_XTS;
493 break;
494 default:
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -0700495 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 };
497
498 if ((creq.alg == CIPHER_ALG_AES) &&
499 (creq.mode == QCE_MODE_CTR)) {
500 creq.dir = QCE_ENCRYPT;
501 } else {
502 if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
503 creq.dir = QCE_ENCRYPT;
504 else
505 creq.dir = QCE_DECRYPT;
506 }
507
508 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
509 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
510
511 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
512 creq.encklen = qcedev_areq->cipher_op_req.encklen;
513
514 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
515
516 if (qcedev_areq->cipher_op_req.encklen == 0) {
517 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
518 || (qcedev_areq->cipher_op_req.op ==
519 QCEDEV_OPER_DEC_NO_KEY))
520 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
521 else {
522 int i;
523
524 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
525 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
526 break;
527 }
528
529 if ((podev->platform_support.hw_key_support == 1) &&
530 (i == QCEDEV_MAX_KEY_SIZE))
531 creq.op = QCE_REQ_ABLK_CIPHER;
532 else {
533 ret = -EINVAL;
534 goto unsupported;
535 }
536 }
537 } else {
538 creq.op = QCE_REQ_ABLK_CIPHER;
539 }
540
541 creq.qce_cb = qcedev_cipher_req_cb;
542 creq.areq = (void *)&qcedev_areq->cipher_req;
543
544 ret = qce_ablk_cipher_req(podev->qce, &creq);
545unsupported:
546 if (ret)
547 qcedev_areq->err = -ENXIO;
548 else
549 qcedev_areq->err = 0;
550 return ret;
551};
552
Mona Hossain650c22c2011-07-19 09:54:19 -0700553static int start_sha_req(struct qcedev_control *podev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554{
555 struct qcedev_async_req *qcedev_areq;
556 struct qce_sha_req sreq;
557 int ret = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700558 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559
560 /* start the command on the podev->active_command */
561 qcedev_areq = podev->active_command;
Mona Hossain087c60b2011-07-20 10:34:57 -0700562 handle = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563
564 switch (qcedev_areq->sha_op_req.alg) {
565 case QCEDEV_ALG_SHA1:
566 sreq.alg = QCE_HASH_SHA1;
567 break;
568 case QCEDEV_ALG_SHA256:
569 sreq.alg = QCE_HASH_SHA256;
570 break;
571 case QCEDEV_ALG_SHA1_HMAC:
572 if (podev->ce_support.sha_hmac) {
573 sreq.alg = QCE_HASH_SHA1_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700574 sreq.authkey = &handle->sha_ctxt.authkey[0];
Mona Hossain69292db2012-07-18 10:14:10 -0700575 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576
577 } else {
578 sreq.alg = QCE_HASH_SHA1;
579 sreq.authkey = NULL;
580 }
581 break;
582 case QCEDEV_ALG_SHA256_HMAC:
583 if (podev->ce_support.sha_hmac) {
584 sreq.alg = QCE_HASH_SHA256_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700585 sreq.authkey = &handle->sha_ctxt.authkey[0];
Mona Hossain69292db2012-07-18 10:14:10 -0700586 sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 } else {
588 sreq.alg = QCE_HASH_SHA256;
589 sreq.authkey = NULL;
590 }
591 break;
592 case QCEDEV_ALG_AES_CMAC:
593 sreq.alg = QCE_HASH_AES_CMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700594 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 sreq.authklen = qcedev_areq->sha_op_req.authklen;
596 break;
597 default:
598 break;
599 };
600
Mona Hossain087c60b2011-07-20 10:34:57 -0700601 qcedev_areq->sha_req.cookie = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602
603 sreq.qce_cb = qcedev_sha_req_cb;
604 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700605 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
606 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
607 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
608 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
609 sreq.digest = &handle->sha_ctxt.digest[0];
610 sreq.first_blk = handle->sha_ctxt.first_blk;
611 sreq.last_blk = handle->sha_ctxt.last_blk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612 }
613 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
614 sreq.src = qcedev_areq->sha_req.sreq.src;
615 sreq.areq = (void *)&qcedev_areq->sha_req;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616
617 ret = qce_process_sha_req(podev->qce, &sreq);
618
619 if (ret)
620 qcedev_areq->err = -ENXIO;
621 else
622 qcedev_areq->err = 0;
623 return ret;
624};
625
626static int submit_req(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700627 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628{
Mona Hossain087c60b2011-07-20 10:34:57 -0700629 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 unsigned long flags = 0;
631 int ret = 0;
632 struct qcedev_stat *pstat;
633
634 qcedev_areq->err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700635 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636
Mona Hossain650c22c2011-07-19 09:54:19 -0700637 if (podev->platform_support.ce_shared) {
638 ret = qcedev_lock_ce(podev);
639 if (ret)
640 return ret;
641 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642
643 spin_lock_irqsave(&podev->lock, flags);
644
645 if (podev->active_command == NULL) {
646 podev->active_command = qcedev_areq;
647 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
648 ret = start_cipher_req(podev);
649 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700650 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 } else {
652 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
653 }
654
655 if (ret != 0)
656 podev->active_command = NULL;
657
658 spin_unlock_irqrestore(&podev->lock, flags);
659
660 if (ret == 0)
661 wait_for_completion(&qcedev_areq->complete);
662
Mona Hossain650c22c2011-07-19 09:54:19 -0700663 if (podev->platform_support.ce_shared)
664 ret = qcedev_unlock_ce(podev);
665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 if (ret)
Mona Hossain650c22c2011-07-19 09:54:19 -0700667 qcedev_areq->err = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668
669 pstat = &_qcedev_stat[podev->pdev->id];
670 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
671 switch (qcedev_areq->cipher_op_req.op) {
672 case QCEDEV_OPER_DEC:
673 if (qcedev_areq->err)
674 pstat->qcedev_dec_fail++;
675 else
676 pstat->qcedev_dec_success++;
677 break;
678 case QCEDEV_OPER_ENC:
679 if (qcedev_areq->err)
680 pstat->qcedev_enc_fail++;
681 else
682 pstat->qcedev_enc_success++;
683 break;
684 default:
685 break;
686 };
687 } else {
688 if (qcedev_areq->err)
689 pstat->qcedev_sha_fail++;
690 else
691 pstat->qcedev_sha_success++;
692 }
693
694 return qcedev_areq->err;
695}
696
697static int qcedev_sha_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700698 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699{
Mona Hossain087c60b2011-07-20 10:34:57 -0700700 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701
702 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
703 sha_ctxt->first_blk = 1;
704
705 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
706 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
707 memcpy(&sha_ctxt->digest[0],
708 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
709 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
710 } else {
711 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
712 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
713 memcpy(&sha_ctxt->digest[0],
714 &_std_init_vector_sha256_uint8[0],
715 SHA256_DIGEST_SIZE);
716 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
717 }
718 }
719 return 0;
720}
721
722
723static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700724 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725{
726 int err = 0;
727 int i = 0;
728 struct scatterlist sg_src[2];
729 uint32_t total;
730
731 uint8_t *user_src = NULL;
732 uint8_t *k_src = NULL;
733 uint8_t *k_buf_src = NULL;
734 uint8_t *k_align_src = NULL;
735
736 uint32_t sha_pad_len = 0;
737 uint32_t trailing_buf_len = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700738 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 uint32_t sha_block_size;
740
741 total = qcedev_areq->sha_op_req.data_len + t_buf;
742
743 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
744 sha_block_size = SHA1_BLOCK_SIZE;
745 else
746 sha_block_size = SHA256_BLOCK_SIZE;
747
748 if (total <= sha_block_size) {
749 uint32_t len = qcedev_areq->sha_op_req.data_len;
750
751 i = 0;
752
Mona Hossain087c60b2011-07-20 10:34:57 -0700753 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754
755 /* Copy data from user src(s) */
756 while (len > 0) {
757 user_src =
758 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
759 if (user_src && __copy_from_user(k_src,
760 (void __user *)user_src,
761 qcedev_areq->sha_op_req.data[i].len))
762 return -EFAULT;
763
764 len -= qcedev_areq->sha_op_req.data[i].len;
765 k_src += qcedev_areq->sha_op_req.data[i].len;
766 i++;
767 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700768 handle->sha_ctxt.trailing_buf_len = total;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700769
770 return 0;
771 }
772
773
774 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
775 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700776 if (k_buf_src == NULL) {
777 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
778 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700780 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700781
782 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
783 CACHE_LINE_SIZE);
784 k_src = k_align_src;
785
786 /* check for trailing buffer from previous updates and append it */
787 if (t_buf > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700788 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789 t_buf);
790 k_src += t_buf;
791 }
792
793 /* Copy data from user src(s) */
794 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
795 if (user_src && __copy_from_user(k_src,
796 (void __user *)user_src,
797 qcedev_areq->sha_op_req.data[0].len)) {
798 kfree(k_buf_src);
799 return -EFAULT;
800 }
801 k_src += qcedev_areq->sha_op_req.data[0].len;
802 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
803 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
804 if (user_src && __copy_from_user(k_src,
805 (void __user *)user_src,
806 qcedev_areq->sha_op_req.data[i].len)) {
807 kfree(k_buf_src);
808 return -EFAULT;
809 }
810 k_src += qcedev_areq->sha_op_req.data[i].len;
811 }
812
813 /* get new trailing buffer */
814 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
815 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
816
817 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
818 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
819 total-trailing_buf_len);
820 sg_mark_end(qcedev_areq->sha_req.sreq.src);
821
822 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
823
824 /* update sha_ctxt trailing buf content to new trailing buf */
825 if (trailing_buf_len > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700826 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
827 memcpy(&handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 (k_src - trailing_buf_len),
829 trailing_buf_len);
830 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700831 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832
Mona Hossain087c60b2011-07-20 10:34:57 -0700833 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834
Mona Hossain087c60b2011-07-20 10:34:57 -0700835 handle->sha_ctxt.last_blk = 0;
836 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837
838 kfree(k_buf_src);
839 return err;
840}
841
842static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700843 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844{
845 int err = 0;
846 int i = 0;
847 int j = 0;
848 int k = 0;
849 int num_entries = 0;
850 uint32_t total = 0;
851
852 /* verify address src(s) */
853 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
854 if (!access_ok(VERIFY_READ,
855 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
856 qcedev_areq->sha_op_req.data[i].len))
857 return -EFAULT;
858
859 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
860
861 struct qcedev_sha_op_req *saved_req;
862 struct qcedev_sha_op_req req;
863 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
864
865 /* save the original req structure */
866 saved_req =
867 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
868 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700869 pr_err("%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700870 __func__, (uint32_t)saved_req);
871 return -ENOMEM;
872 }
873 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
874 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
875
876 i = 0;
877 /* Address 32 KB at a time */
878 while ((i < req.entries) && (err == 0)) {
879 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
880 sreq->data[0].len = QCE_MAX_OPER_DATA;
881 if (i > 0) {
882 sreq->data[0].vaddr =
883 sreq->data[i].vaddr;
884 }
885
886 sreq->data_len = QCE_MAX_OPER_DATA;
887 sreq->entries = 1;
888
889 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700890 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891
892 sreq->data[i].len = req.data[i].len -
893 QCE_MAX_OPER_DATA;
894 sreq->data[i].vaddr = req.data[i].vaddr +
895 QCE_MAX_OPER_DATA;
896 req.data[i].vaddr = sreq->data[i].vaddr;
897 req.data[i].len = sreq->data[i].len;
898 } else {
899 total = 0;
900 for (j = i; j < req.entries; j++) {
901 num_entries++;
902 if ((total + sreq->data[j].len) >=
903 QCE_MAX_OPER_DATA) {
904 sreq->data[j].len =
905 (QCE_MAX_OPER_DATA - total);
906 total = QCE_MAX_OPER_DATA;
907 break;
908 }
909 total += sreq->data[j].len;
910 }
911
912 sreq->data_len = total;
913 if (i > 0)
914 for (k = 0; k < num_entries; k++) {
915 sreq->data[k].len =
916 sreq->data[i+k].len;
917 sreq->data[k].vaddr =
918 sreq->data[i+k].vaddr;
919 }
920 sreq->entries = num_entries;
921
922 i = j;
923 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700924 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 num_entries = 0;
926
927 sreq->data[i].vaddr = req.data[i].vaddr +
928 sreq->data[i].len;
929 sreq->data[i].len = req.data[i].len -
930 sreq->data[i].len;
931 req.data[i].vaddr = sreq->data[i].vaddr;
932 req.data[i].len = sreq->data[i].len;
933
934 if (sreq->data[i].len == 0)
935 i++;
936 }
937 } /* end of while ((i < req.entries) && (err == 0)) */
938
939 /* Restore the original req structure */
940 for (i = 0; i < saved_req->entries; i++) {
941 sreq->data[i].len = saved_req->data[i].len;
942 sreq->data[i].vaddr = saved_req->data[i].vaddr;
943 }
944 sreq->entries = saved_req->entries;
945 sreq->data_len = saved_req->data_len;
946 kfree(saved_req);
947 } else
Mona Hossain087c60b2011-07-20 10:34:57 -0700948 err = qcedev_sha_update_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949
950 return err;
951}
952
953static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700954 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955{
956 int err = 0;
957 struct scatterlist sg_src;
958 uint32_t total;
959
960 uint8_t *k_buf_src = NULL;
961 uint8_t *k_align_src = NULL;
962
Mona Hossain087c60b2011-07-20 10:34:57 -0700963 handle->sha_ctxt.last_blk = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964
Mona Hossain087c60b2011-07-20 10:34:57 -0700965 total = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966
967 if (total) {
968 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
969 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700970 if (k_buf_src == NULL) {
971 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
972 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700974 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975
976 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
977 CACHE_LINE_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -0700978 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
981 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
982 sg_mark_end(qcedev_areq->sha_req.sreq.src);
983
984 qcedev_areq->sha_req.sreq.nbytes = total;
985
Mona Hossain087c60b2011-07-20 10:34:57 -0700986 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987
Mona Hossain087c60b2011-07-20 10:34:57 -0700988 handle->sha_ctxt.first_blk = 0;
989 handle->sha_ctxt.last_blk = 0;
990 handle->sha_ctxt.auth_data[0] = 0;
991 handle->sha_ctxt.auth_data[1] = 0;
992 handle->sha_ctxt.trailing_buf_len = 0;
993 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994
995 kfree(k_buf_src);
996 return err;
997}
998
999static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001000 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001{
1002 int err = 0;
1003 int i = 0;
1004 struct scatterlist sg_src[2];
1005 uint32_t total;
1006
1007 uint8_t *user_src = NULL;
1008 uint8_t *k_src = NULL;
1009 uint8_t *k_buf_src = NULL;
1010
1011 total = qcedev_areq->sha_op_req.data_len;
1012
1013 /* verify address src(s) */
1014 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
1015 if (!access_ok(VERIFY_READ,
1016 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
1017 qcedev_areq->sha_op_req.data[i].len))
1018 return -EFAULT;
1019
1020 /* Verify Source Address */
1021 if (!access_ok(VERIFY_READ,
1022 (void __user *)qcedev_areq->sha_op_req.authkey,
1023 qcedev_areq->sha_op_req.authklen))
1024 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001025 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026 (void __user *)qcedev_areq->sha_op_req.authkey,
1027 qcedev_areq->sha_op_req.authklen))
1028 return -EFAULT;
1029
1030
1031 k_buf_src = kmalloc(total, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001032 if (k_buf_src == NULL) {
1033 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
1034 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001036 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037
1038 k_src = k_buf_src;
1039
1040 /* Copy data from user src(s) */
1041 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
1042 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
1043 user_src =
1044 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
1045 if (user_src && __copy_from_user(k_src, (void __user *)user_src,
1046 qcedev_areq->sha_op_req.data[i].len)) {
1047 kfree(k_buf_src);
1048 return -EFAULT;
1049 }
1050 k_src += qcedev_areq->sha_op_req.data[i].len;
1051 }
1052
1053 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
1054 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
1055 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1056
1057 qcedev_areq->sha_req.sreq.nbytes = total;
Mona Hossain087c60b2011-07-20 10:34:57 -07001058 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
1059 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060
1061 kfree(k_buf_src);
1062 return err;
1063}
1064
1065static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001066 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067{
1068 int err = 0;
1069
1070 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
Mona Hossain69292db2012-07-18 10:14:10 -07001071 qcedev_sha_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 /* Verify Source Address */
1073 if (!access_ok(VERIFY_READ,
1074 (void __user *)areq->sha_op_req.authkey,
1075 areq->sha_op_req.authklen))
1076 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001077 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078 (void __user *)areq->sha_op_req.authkey,
1079 areq->sha_op_req.authklen))
1080 return -EFAULT;
1081 } else {
1082 struct qcedev_async_req authkey_areq;
Mona Hossain69292db2012-07-18 10:14:10 -07001083 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084
1085 init_completion(&authkey_areq.complete);
1086
1087 authkey_areq.sha_op_req.entries = 1;
1088 authkey_areq.sha_op_req.data[0].vaddr =
1089 areq->sha_op_req.authkey;
1090 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
1091 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
1092 authkey_areq.sha_op_req.diglen = 0;
Mona Hossain69292db2012-07-18 10:14:10 -07001093 authkey_areq.handle = handle;
1094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 memset(&authkey_areq.sha_op_req.digest[0], 0,
1096 QCEDEV_MAX_SHA_DIGEST);
1097 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1098 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1099 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1100 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1101
1102 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1103
Mona Hossain087c60b2011-07-20 10:34:57 -07001104 qcedev_sha_init(&authkey_areq, handle);
1105 err = qcedev_sha_update(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 if (!err)
Mona Hossain087c60b2011-07-20 10:34:57 -07001107 err = qcedev_sha_final(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 else
1109 return err;
Mona Hossain69292db2012-07-18 10:14:10 -07001110 memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
1111 handle->sha_ctxt.diglen);
1112 qcedev_sha_init(areq, handle);
1113
1114 memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001115 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 }
1117 return err;
1118}
1119
1120static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001121 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001122{
1123 int err = 0;
1124 struct scatterlist sg_src;
1125 uint8_t *k_src = NULL;
1126 uint32_t sha_block_size = 0;
1127 uint32_t sha_digest_size = 0;
1128
1129 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1130 sha_digest_size = SHA1_DIGEST_SIZE;
1131 sha_block_size = SHA1_BLOCK_SIZE;
1132 } else {
1133 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1134 sha_digest_size = SHA256_DIGEST_SIZE;
1135 sha_block_size = SHA256_BLOCK_SIZE;
1136 }
1137 }
1138 k_src = kmalloc(sha_block_size, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001139 if (k_src == NULL) {
1140 pr_err("%s: Can't Allocate memory: k_src 0x%x\n",
1141 __func__, (uint32_t)k_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001143 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144
1145 /* check for trailing buffer from previous updates and append it */
Mona Hossain087c60b2011-07-20 10:34:57 -07001146 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1147 handle->sha_ctxt.trailing_buf_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148
1149 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1150 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1151 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1152
1153 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
Mona Hossain087c60b2011-07-20 10:34:57 -07001154 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1155 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001156 sha_digest_size);
Mona Hossain087c60b2011-07-20 10:34:57 -07001157 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158
Mona Hossain087c60b2011-07-20 10:34:57 -07001159 handle->sha_ctxt.first_blk = 1;
1160 handle->sha_ctxt.last_blk = 0;
1161 handle->sha_ctxt.auth_data[0] = 0;
1162 handle->sha_ctxt.auth_data[1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163
1164 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001165 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001167 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 }
1169
1170 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001171 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001173 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 }
Mona Hossain087c60b2011-07-20 10:34:57 -07001175 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176
Mona Hossain087c60b2011-07-20 10:34:57 -07001177 handle->sha_ctxt.last_blk = 0;
1178 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179
1180 kfree(k_src);
1181 return err;
1182}
1183
1184static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001185 struct qcedev_handle *handle, bool ikey)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186{
1187 int i;
1188 uint32_t constant;
1189 uint32_t sha_block_size;
1190
1191 if (ikey)
1192 constant = 0x36;
1193 else
1194 constant = 0x5c;
1195
1196 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1197 sha_block_size = SHA1_BLOCK_SIZE;
1198 else
1199 sha_block_size = SHA256_BLOCK_SIZE;
1200
Mona Hossain087c60b2011-07-20 10:34:57 -07001201 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 for (i = 0; i < sha_block_size; i++)
Mona Hossain087c60b2011-07-20 10:34:57 -07001203 handle->sha_ctxt.trailing_buf[i] =
1204 (handle->sha_ctxt.authkey[i] ^ constant);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205
Mona Hossain087c60b2011-07-20 10:34:57 -07001206 handle->sha_ctxt.trailing_buf_len = sha_block_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207 return 0;
1208}
1209
1210static int qcedev_hmac_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001211 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212{
1213 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001214 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001215
Mona Hossain087c60b2011-07-20 10:34:57 -07001216 err = qcedev_set_hmac_auth_key(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217 if (err)
1218 return err;
1219 if (!podev->ce_support.sha_hmac)
Mona Hossain087c60b2011-07-20 10:34:57 -07001220 qcedev_hmac_update_iokey(areq, handle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 return 0;
1222}
1223
1224static int qcedev_hmac_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001225 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226{
1227 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001228 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229
Mona Hossain087c60b2011-07-20 10:34:57 -07001230 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 if (podev->ce_support.sha_hmac)
1232 return err;
1233
Mona Hossain087c60b2011-07-20 10:34:57 -07001234 qcedev_hmac_update_iokey(areq, handle, false);
1235 err = qcedev_hmac_get_ohash(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236 if (err)
1237 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001238 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239
1240 return err;
1241}
1242
1243static int qcedev_hash_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001244 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245{
1246 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1247 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001248 return qcedev_sha_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001250 return qcedev_hmac_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251}
1252
1253static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001254 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001255{
Mona Hossain087c60b2011-07-20 10:34:57 -07001256 return qcedev_sha_update(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257}
1258
1259static int qcedev_hash_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001260 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261{
1262 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1263 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001264 return qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001266 return qcedev_hmac_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267}
1268
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001269#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001271 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272{
1273 int i = 0;
1274 int err = 0;
1275 struct scatterlist *sg_src = NULL;
1276 struct scatterlist *sg_dst = NULL;
1277 struct scatterlist *sg_ndex = NULL;
1278 struct file *file_src = NULL;
1279 struct file *file_dst = NULL;
1280 unsigned long paddr;
1281 unsigned long kvaddr;
1282 unsigned long len;
1283
1284 sg_src = kmalloc((sizeof(struct scatterlist) *
1285 areq->cipher_op_req.entries), GFP_KERNEL);
1286 if (sg_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001287 pr_err("%s: Can't Allocate memory:sg_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001288 __func__, (uint32_t)sg_src);
1289 return -ENOMEM;
1290
1291 }
1292 memset(sg_src, 0, (sizeof(struct scatterlist) *
1293 areq->cipher_op_req.entries));
1294 sg_ndex = sg_src;
1295 areq->cipher_req.creq.src = sg_src;
1296
1297 /* address src */
1298 get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
1299 &kvaddr, &len, &file_src);
1300
1301 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1302 sg_set_buf(sg_ndex,
1303 ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
1304 areq->cipher_op_req.pmem.src[i].len);
1305 sg_ndex++;
1306 }
1307 sg_mark_end(--sg_ndex);
1308
1309 for (i = 0; i < areq->cipher_op_req.entries; i++)
1310 areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
1311
1312 /* address dst */
1313 /* If not place encryption/decryption */
1314 if (areq->cipher_op_req.in_place_op != 1) {
1315 sg_dst = kmalloc((sizeof(struct scatterlist) *
1316 areq->cipher_op_req.entries), GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001317 if (sg_dst == NULL) {
1318 pr_err("%s: Can't Allocate memory: sg_dst 0x%x\n",
1319 __func__, (uint32_t)sg_dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001321 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322 memset(sg_dst, 0, (sizeof(struct scatterlist) *
1323 areq->cipher_op_req.entries));
1324 areq->cipher_req.creq.dst = sg_dst;
1325 sg_ndex = sg_dst;
1326
1327 get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
1328 &kvaddr, &len, &file_dst);
1329 for (i = 0; i < areq->cipher_op_req.entries; i++)
1330 sg_set_buf(sg_ndex++,
1331 ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
1332 + kvaddr), areq->cipher_op_req.pmem.dst[i].len);
1333 sg_mark_end(--sg_ndex);
1334
1335 for (i = 0; i < areq->cipher_op_req.entries; i++)
1336 areq->cipher_op_req.pmem.dst[i].offset +=
1337 (uint32_t)paddr;
1338 } else {
1339 areq->cipher_req.creq.dst = sg_src;
1340 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1341 areq->cipher_op_req.pmem.dst[i].offset =
1342 areq->cipher_op_req.pmem.src[i].offset;
1343 areq->cipher_op_req.pmem.dst[i].len =
1344 areq->cipher_op_req.pmem.src[i].len;
1345 }
1346 }
1347
1348 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1349 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1350
Mona Hossain087c60b2011-07-20 10:34:57 -07001351 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352
1353 kfree(sg_src);
1354 kfree(sg_dst);
1355
1356 if (file_dst)
1357 put_pmem_file(file_dst);
1358 if (file_src)
1359 put_pmem_file(file_src);
1360
1361 return err;
1362};
1363
1364
1365static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001366 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367{
1368 int err = 0;
1369 int i = 0;
1370 int j = 0;
1371 int k = 0;
1372 int num_entries = 0;
1373 uint32_t total = 0;
1374 struct qcedev_cipher_op_req *saved_req;
1375 struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
1376
1377 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1378 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001379 pr_err(KERN_ERR "%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 __func__, (uint32_t)saved_req);
1381 return -ENOMEM;
1382 }
1383 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1384
1385 if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
1386
1387 struct qcedev_cipher_op_req req;
1388
1389 /* save the original req structure */
1390 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1391
1392 i = 0;
1393 /* Address 32 KB at a time */
1394 while ((i < req.entries) && (err == 0)) {
1395 if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
1396 creq->pmem.src[0].len = QCE_MAX_OPER_DATA;
1397 if (i > 0) {
1398 creq->pmem.src[0].offset =
1399 creq->pmem.src[i].offset;
1400 }
1401
1402 creq->data_len = QCE_MAX_OPER_DATA;
1403 creq->entries = 1;
1404
1405 err =
1406 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001407 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408
1409 creq->pmem.src[i].len = req.pmem.src[i].len -
1410 QCE_MAX_OPER_DATA;
1411 creq->pmem.src[i].offset =
1412 req.pmem.src[i].offset +
1413 QCE_MAX_OPER_DATA;
1414 req.pmem.src[i].offset =
1415 creq->pmem.src[i].offset;
1416 req.pmem.src[i].len = creq->pmem.src[i].len;
1417 } else {
1418 total = 0;
1419 for (j = i; j < req.entries; j++) {
1420 num_entries++;
1421 if ((total + creq->pmem.src[j].len)
1422 >= QCE_MAX_OPER_DATA) {
1423 creq->pmem.src[j].len =
1424 QCE_MAX_OPER_DATA - total;
1425 total = QCE_MAX_OPER_DATA;
1426 break;
1427 }
1428 total += creq->pmem.src[j].len;
1429 }
1430
1431 creq->data_len = total;
1432 if (i > 0)
1433 for (k = 0; k < num_entries; k++) {
1434 creq->pmem.src[k].len =
1435 creq->pmem.src[i+k].len;
1436 creq->pmem.src[k].offset =
1437 creq->pmem.src[i+k].offset;
1438 }
1439 creq->entries = num_entries;
1440
1441 i = j;
1442 err =
1443 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001444 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445 num_entries = 0;
1446
1447 creq->pmem.src[i].offset =
1448 req.pmem.src[i].offset +
1449 creq->pmem.src[i].len;
1450 creq->pmem.src[i].len =
1451 req.pmem.src[i].len -
1452 creq->pmem.src[i].len;
1453 req.pmem.src[i].offset =
1454 creq->pmem.src[i].offset;
1455 req.pmem.src[i].len =
1456 creq->pmem.src[i].len;
1457
1458 if (creq->pmem.src[i].len == 0)
1459 i++;
1460 }
1461
1462 } /* end of while ((i < req.entries) && (err == 0)) */
1463
1464 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001465 err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466
1467 /* Restore the original req structure */
1468 for (i = 0; i < saved_req->entries; i++) {
1469 creq->pmem.src[i].len = saved_req->pmem.src[i].len;
1470 creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
1471 }
1472 creq->entries = saved_req->entries;
1473 creq->data_len = saved_req->data_len;
1474 kfree(saved_req);
1475
1476 return err;
1477
1478}
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001479#else
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001480static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
1481 struct qcedev_handle *handle)
1482{
1483 return -EPERM;
1484}
1485#endif/*CONFIG_ANDROID_PMEM*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486
1487static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001488 int *di, struct qcedev_handle *handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 uint8_t *k_align_src)
1490{
1491 int err = 0;
1492 int i = 0;
1493 int dst_i = *di;
1494 struct scatterlist sg_src;
1495 uint32_t byteoffset = 0;
1496 uint8_t *user_src = NULL;
1497 uint8_t *k_align_dst = k_align_src;
1498 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1499
1500
1501 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1502 byteoffset = areq->cipher_op_req.byteoffset;
1503
1504 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1505 if (user_src && __copy_from_user((k_align_src + byteoffset),
1506 (void __user *)user_src,
1507 areq->cipher_op_req.vbuf.src[0].len))
1508 return -EFAULT;
1509
1510 k_align_src += areq->cipher_op_req.vbuf.src[0].len;
1511
1512 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1513 user_src =
1514 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1515 if (user_src && __copy_from_user(k_align_src,
1516 (void __user *)user_src,
1517 areq->cipher_op_req.vbuf.src[i].len)) {
1518 return -EFAULT;
1519 }
1520 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1521 }
1522
1523 /* restore src beginning */
1524 k_align_src = k_align_dst;
1525 areq->cipher_op_req.data_len += byteoffset;
1526
1527 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1528 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1529
1530 /* In place encryption/decryption */
1531 sg_set_buf(areq->cipher_req.creq.src,
1532 k_align_dst,
1533 areq->cipher_op_req.data_len);
1534 sg_mark_end(areq->cipher_req.creq.src);
1535
1536 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1537 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1538 areq->cipher_op_req.entries = 1;
1539
Mona Hossain087c60b2011-07-20 10:34:57 -07001540 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001541
1542 /* copy data to destination buffer*/
1543 creq->data_len -= byteoffset;
1544
1545 while (creq->data_len > 0) {
1546 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1547 if (err == 0 && __copy_to_user(
1548 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1549 (k_align_dst + byteoffset),
1550 creq->vbuf.dst[dst_i].len))
1551 return -EFAULT;
1552
1553 k_align_dst += creq->vbuf.dst[dst_i].len +
1554 byteoffset;
1555 creq->data_len -= creq->vbuf.dst[dst_i].len;
1556 dst_i++;
1557 } else {
1558 if (err == 0 && __copy_to_user(
1559 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1560 (k_align_dst + byteoffset),
1561 creq->data_len))
1562 return -EFAULT;
1563
1564 k_align_dst += creq->data_len;
1565 creq->vbuf.dst[dst_i].len -= creq->data_len;
1566 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1567 creq->data_len = 0;
1568 }
1569 }
1570 *di = dst_i;
1571
1572 return err;
1573};
1574
1575static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001576 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577{
1578 int err = 0;
1579 int di = 0;
1580 int i = 0;
1581 int j = 0;
1582 int k = 0;
1583 uint32_t byteoffset = 0;
1584 int num_entries = 0;
1585 uint32_t total = 0;
1586 uint32_t len;
1587 uint8_t *k_buf_src = NULL;
1588 uint8_t *k_align_src = NULL;
1589 uint32_t max_data_xfer;
1590 struct qcedev_cipher_op_req *saved_req;
1591 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1592
1593 /* Verify Source Address's */
1594 for (i = 0; i < areq->cipher_op_req.entries; i++)
1595 if (!access_ok(VERIFY_READ,
1596 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
1597 areq->cipher_op_req.vbuf.src[i].len))
1598 return -EFAULT;
1599
1600 /* Verify Destination Address's */
1601 if (areq->cipher_op_req.in_place_op != 1)
1602 for (i = 0; i < areq->cipher_op_req.entries; i++)
1603 if (!access_ok(VERIFY_READ,
1604 (void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
1605 areq->cipher_op_req.vbuf.dst[i].len))
1606 return -EFAULT;
1607
1608 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1609 byteoffset = areq->cipher_op_req.byteoffset;
1610 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1611 GFP_KERNEL);
1612 if (k_buf_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001613 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001614 __func__, (uint32_t)k_buf_src);
1615 return -ENOMEM;
1616 }
1617 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
1618 CACHE_LINE_SIZE);
1619 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1620
1621 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1622 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001623 pr_err("%s: Can't Allocate memory:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001624 __func__, (uint32_t)saved_req);
1625 kfree(k_buf_src);
1626 return -ENOMEM;
1627
1628 }
1629 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1630
1631 if (areq->cipher_op_req.data_len > max_data_xfer) {
1632 struct qcedev_cipher_op_req req;
1633
1634 /* save the original req structure */
1635 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1636
1637 i = 0;
1638 /* Address 32 KB at a time */
1639 while ((i < req.entries) && (err == 0)) {
1640 if (creq->vbuf.src[i].len > max_data_xfer) {
1641 creq->vbuf.src[0].len = max_data_xfer;
1642 if (i > 0) {
1643 creq->vbuf.src[0].vaddr =
1644 creq->vbuf.src[i].vaddr;
1645 }
1646
1647 creq->data_len = max_data_xfer;
1648 creq->entries = 1;
1649
1650 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001651 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 if (err < 0) {
1653 kfree(k_buf_src);
1654 kfree(saved_req);
1655 return err;
1656 }
1657
1658 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1659 max_data_xfer;
1660 creq->vbuf.src[i].vaddr =
1661 req.vbuf.src[i].vaddr +
1662 max_data_xfer;
1663 req.vbuf.src[i].vaddr =
1664 creq->vbuf.src[i].vaddr;
1665 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1666
1667 } else {
1668 total = areq->cipher_op_req.byteoffset;
1669 for (j = i; j < req.entries; j++) {
1670 num_entries++;
1671 if ((total + creq->vbuf.src[j].len)
1672 >= max_data_xfer) {
1673 creq->vbuf.src[j].len =
1674 max_data_xfer - total;
1675 total = max_data_xfer;
1676 break;
1677 }
1678 total += creq->vbuf.src[j].len;
1679 }
1680
1681 creq->data_len = total;
1682 if (i > 0)
1683 for (k = 0; k < num_entries; k++) {
1684 creq->vbuf.src[k].len =
1685 creq->vbuf.src[i+k].len;
1686 creq->vbuf.src[k].vaddr =
1687 creq->vbuf.src[i+k].vaddr;
1688 }
1689 creq->entries = num_entries;
1690
1691 i = j;
1692 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001693 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 if (err < 0) {
1695 kfree(k_buf_src);
1696 kfree(saved_req);
1697 return err;
1698 }
1699
1700 num_entries = 0;
1701 areq->cipher_op_req.byteoffset = 0;
1702
1703 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1704 + creq->vbuf.src[i].len;
1705 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1706 creq->vbuf.src[i].len;
1707
1708 req.vbuf.src[i].vaddr =
1709 creq->vbuf.src[i].vaddr;
1710 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1711
1712 if (creq->vbuf.src[i].len == 0)
1713 i++;
1714 }
1715
1716 areq->cipher_op_req.byteoffset = 0;
1717 max_data_xfer = QCE_MAX_OPER_DATA;
1718 byteoffset = 0;
1719
1720 } /* end of while ((i < req.entries) && (err == 0)) */
1721 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001722 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001723 k_align_src);
1724
1725 /* Restore the original req structure */
1726 for (i = 0; i < saved_req->entries; i++) {
1727 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1728 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1729 }
1730 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1731 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1732 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1733 len += saved_req->vbuf.dst[i].len;
1734 }
1735 creq->entries = saved_req->entries;
1736 creq->data_len = saved_req->data_len;
1737 creq->byteoffset = saved_req->byteoffset;
1738
1739 kfree(saved_req);
1740 kfree(k_buf_src);
1741 return err;
1742
1743}
1744
1745static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1746 struct qcedev_control *podev)
1747{
1748 if ((req->entries == 0) || (req->data_len == 0))
1749 goto error;
1750 if ((req->alg >= QCEDEV_ALG_LAST) ||
1751 (req->mode >= QCEDEV_AES_DES_MODE_LAST))
1752 goto error;
1753 if (req->alg == QCEDEV_ALG_AES) {
1754 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1755 (!podev->ce_support.aes_xts))
1756 goto error;
1757 /* if intending to use HW key make sure key fields are set
1758 * correctly and HW key is indeed supported in target
1759 */
1760 if (req->encklen == 0) {
1761 int i;
1762 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
1763 if (req->enckey[i])
1764 goto error;
1765 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1766 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1767 if (!podev->platform_support.hw_key_support)
1768 goto error;
1769 } else {
1770 if (req->encklen == QCEDEV_AES_KEY_192) {
1771 if (!podev->ce_support.aes_key_192)
1772 goto error;
1773 } else {
1774 /* if not using HW key make sure key
1775 * length is valid
1776 */
1777 if (!((req->encklen == QCEDEV_AES_KEY_128) ||
1778 (req->encklen == QCEDEV_AES_KEY_256)))
1779 goto error;
1780 }
1781 }
1782 }
1783 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1784 if (req->byteoffset) {
1785 if (req->mode != QCEDEV_AES_MODE_CTR)
1786 goto error;
1787 else { /* if using CTR mode make sure not using Pmem */
1788 if (req->use_pmem)
1789 goto error;
1790 }
1791 }
1792 /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
1793 if (req->use_pmem) {
1794 if (!req->in_place_op)
1795 goto error;
1796 }
1797 /* Ensure zer ivlen for ECB mode */
1798 if (req->ivlen != 0) {
1799 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1800 (req->mode == QCEDEV_DES_MODE_ECB))
1801 goto error;
1802 } else {
1803 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1804 (req->mode != QCEDEV_DES_MODE_ECB))
1805 goto error;
1806 }
1807
1808 return 0;
1809error:
1810 return -EINVAL;
1811
1812}
1813
1814static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1815 struct qcedev_control *podev)
1816{
1817 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1818 (!podev->ce_support.cmac))
1819 goto sha_error;
1820
1821 if ((req->entries == 0) || (req->data_len == 0))
1822 goto sha_error;
1823
1824 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
1825 goto sha_error;
1826
1827 return 0;
1828sha_error:
1829 return -EINVAL;
1830}
1831
1832static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1833{
1834 int err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -07001835 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001836 struct qcedev_control *podev;
1837 struct qcedev_async_req qcedev_areq;
1838 struct qcedev_stat *pstat;
1839
Mona Hossain087c60b2011-07-20 10:34:57 -07001840 handle = file->private_data;
1841 podev = handle->cntl;
1842 qcedev_areq.handle = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001843 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001844 pr_err("%s: invalid handle %p\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001845 __func__, podev);
1846 return -ENOENT;
1847 }
1848
1849 /* Verify user arguments. */
1850 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1851 return -ENOTTY;
1852
1853 init_completion(&qcedev_areq.complete);
1854 pstat = &_qcedev_stat[podev->pdev->id];
1855
1856 switch (cmd) {
1857 case QCEDEV_IOCTL_LOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001858 if (podev->platform_support.ce_shared)
1859 err = qcedev_lock_ce(podev);
1860 else
1861 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 break;
1863 case QCEDEV_IOCTL_UNLOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001864 if (podev->platform_support.ce_shared)
1865 err = qcedev_unlock_ce(podev);
1866 else
1867 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001868 break;
1869 case QCEDEV_IOCTL_ENC_REQ:
1870 case QCEDEV_IOCTL_DEC_REQ:
1871 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1872 sizeof(struct qcedev_cipher_op_req)))
1873 return -EFAULT;
1874
1875 if (__copy_from_user(&qcedev_areq.cipher_op_req,
1876 (void __user *)arg,
1877 sizeof(struct qcedev_cipher_op_req)))
1878 return -EFAULT;
1879 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1880
1881 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1882 podev))
1883 return -EINVAL;
1884
Ramesh Masavarapua63ff1e2011-10-20 10:51:25 -07001885 if (qcedev_areq.cipher_op_req.use_pmem)
Mona Hossain087c60b2011-07-20 10:34:57 -07001886 err = qcedev_pmem_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001888 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 if (err)
1890 return err;
1891 if (__copy_to_user((void __user *)arg,
1892 &qcedev_areq.cipher_op_req,
1893 sizeof(struct qcedev_cipher_op_req)))
1894 return -EFAULT;
1895 break;
1896
1897 case QCEDEV_IOCTL_SHA_INIT_REQ:
1898
1899 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1900 sizeof(struct qcedev_sha_op_req)))
1901 return -EFAULT;
1902
1903 if (__copy_from_user(&qcedev_areq.sha_op_req,
1904 (void __user *)arg,
1905 sizeof(struct qcedev_sha_op_req)))
1906 return -EFAULT;
1907 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1908 return -EINVAL;
1909 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001910 err = qcedev_hash_init(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911 if (err)
1912 return err;
1913 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1914 sizeof(struct qcedev_sha_op_req)))
1915 return -EFAULT;
1916 break;
1917 case QCEDEV_IOCTL_GET_CMAC_REQ:
1918 if (!podev->ce_support.cmac)
1919 return -ENOTTY;
1920 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1921 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1922 sizeof(struct qcedev_sha_op_req)))
1923 return -EFAULT;
1924
1925 if (__copy_from_user(&qcedev_areq.sha_op_req,
1926 (void __user *)arg,
1927 sizeof(struct qcedev_sha_op_req)))
1928 return -EFAULT;
1929 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1930 return -EINVAL;
1931 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1932
1933 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001934 err = qcedev_hash_cmac(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 if (err)
1936 return err;
1937 } else {
Mona Hossain087c60b2011-07-20 10:34:57 -07001938 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001939 if (err)
1940 return err;
1941 }
1942
1943 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001944 &handle->sha_ctxt.digest[0],
1945 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1947 sizeof(struct qcedev_sha_op_req)))
1948 return -EFAULT;
1949 break;
1950
1951 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1952
1953 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1954 sizeof(struct qcedev_sha_op_req)))
1955 return -EFAULT;
1956
1957 if (__copy_from_user(&qcedev_areq.sha_op_req,
1958 (void __user *)arg,
1959 sizeof(struct qcedev_sha_op_req)))
1960 return -EFAULT;
1961 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1962 return -EINVAL;
1963 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001964 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001965 if (err)
1966 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001967 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001969 &handle->sha_ctxt.digest[0],
1970 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001971 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1972 sizeof(struct qcedev_sha_op_req)))
1973 return -EFAULT;
1974 break;
1975
1976 case QCEDEV_IOCTL_GET_SHA_REQ:
1977
1978 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1979 sizeof(struct qcedev_sha_op_req)))
1980 return -EFAULT;
1981
1982 if (__copy_from_user(&qcedev_areq.sha_op_req,
1983 (void __user *)arg,
1984 sizeof(struct qcedev_sha_op_req)))
1985 return -EFAULT;
1986 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1987 return -EINVAL;
1988 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001989 qcedev_hash_init(&qcedev_areq, handle);
1990 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991 if (err)
1992 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001993 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001994 if (err)
1995 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001996 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001998 &handle->sha_ctxt.digest[0],
1999 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
2001 sizeof(struct qcedev_sha_op_req)))
2002 return -EFAULT;
2003 break;
2004
2005 default:
2006 return -ENOTTY;
2007 }
2008
2009 return err;
2010}
2011
2012static int qcedev_probe(struct platform_device *pdev)
2013{
2014 void *handle = NULL;
2015 int rc = 0;
2016 struct qcedev_control *podev;
2017 struct msm_ce_hw_support *platform_support;
2018
2019 if (pdev->id >= MAX_QCE_DEVICE) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07002020 pr_err("%s: device id %d exceeds allowed %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002021 __func__, pdev->id, MAX_QCE_DEVICE);
2022 return -ENOENT;
2023 }
2024 podev = &qce_dev[pdev->id];
2025
2026 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
2027 podev->platform_support.ce_shared = platform_support->ce_shared;
2028 podev->platform_support.shared_ce_resource =
2029 platform_support->shared_ce_resource;
2030 podev->platform_support.hw_key_support =
2031 platform_support->hw_key_support;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002032 podev->platform_support.bus_scale_table =
2033 platform_support->bus_scale_table;
Mona Hossain650c22c2011-07-19 09:54:19 -07002034 podev->ce_lock_count = 0;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002035 podev->high_bw_req_count = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036 INIT_LIST_HEAD(&podev->ready_commands);
2037 podev->active_command = NULL;
2038
2039 spin_lock_init(&podev->lock);
2040
2041 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
2042
2043 /* open qce */
2044 handle = qce_open(pdev, &rc);
2045 if (handle == NULL) {
2046 platform_set_drvdata(pdev, NULL);
2047 return rc;
2048 }
2049
2050 podev->qce = handle;
2051 podev->pdev = pdev;
2052 platform_set_drvdata(pdev, podev);
2053 qce_hw_support(podev->qce, &podev->ce_support);
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002054
2055 if (podev->platform_support.bus_scale_table != NULL) {
2056 podev->bus_scale_handle =
2057 msm_bus_scale_register_client(
2058 (struct msm_bus_scale_pdata *)
2059 podev->platform_support.bus_scale_table);
2060 if (!podev->bus_scale_handle) {
2061 printk(KERN_ERR "%s not able to get bus scale\n",
2062 __func__);
2063 rc = -ENOMEM;
2064 goto err;
2065 }
2066 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002067 rc = misc_register(&podev->miscdevice);
2068
2069 if (rc >= 0)
2070 return 0;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002071 else
2072 if (podev->platform_support.bus_scale_table != NULL)
2073 msm_bus_scale_unregister_client(
2074 podev->bus_scale_handle);
2075err:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002076
2077 if (handle)
2078 qce_close(handle);
2079 platform_set_drvdata(pdev, NULL);
2080 podev->qce = NULL;
2081 podev->pdev = NULL;
2082 return rc;
2083};
2084
2085static int qcedev_remove(struct platform_device *pdev)
2086{
2087 struct qcedev_control *podev;
2088
2089 podev = platform_get_drvdata(pdev);
2090 if (!podev)
2091 return 0;
2092 if (podev->qce)
2093 qce_close(podev->qce);
2094
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002095 if (podev->platform_support.bus_scale_table != NULL)
2096 msm_bus_scale_unregister_client(podev->bus_scale_handle);
2097
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002098 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2099 misc_deregister(&podev->miscdevice);
2100 tasklet_kill(&podev->done_tasklet);
2101 return 0;
2102};
2103
Mona Hossain92c2ef92012-07-05 09:38:17 -07002104static struct of_device_id qcedev_match[] = {
2105 { .compatible = "qcom,qcedev",
2106 },
2107 {}
2108};
2109
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002110static struct platform_driver qcedev_plat_driver = {
2111 .probe = qcedev_probe,
2112 .remove = qcedev_remove,
2113 .driver = {
2114 .name = "qce",
2115 .owner = THIS_MODULE,
Mona Hossain92c2ef92012-07-05 09:38:17 -07002116 .of_match_table = qcedev_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117 },
2118};
2119
2120static int _disp_stats(int id)
2121{
2122 struct qcedev_stat *pstat;
2123 int len = 0;
2124
2125 pstat = &_qcedev_stat[id];
2126 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2127 "\nQualcomm QCE dev driver %d Statistics:\n",
2128 id + 1);
2129
2130 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2131 " Encryption operation success : %d\n",
2132 pstat->qcedev_enc_success);
2133 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2134 " Encryption operation fail : %d\n",
2135 pstat->qcedev_enc_fail);
2136 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2137 " Decryption operation success : %d\n",
2138 pstat->qcedev_dec_success);
2139
2140 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2141 " Encryption operation fail : %d\n",
2142 pstat->qcedev_dec_fail);
2143
2144 return len;
2145}
2146
2147static int _debug_stats_open(struct inode *inode, struct file *file)
2148{
2149 file->private_data = inode->i_private;
2150 return 0;
2151}
2152
2153static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2154 size_t count, loff_t *ppos)
2155{
2156 int rc = -EINVAL;
2157 int qcedev = *((int *) file->private_data);
2158 int len;
2159
2160 len = _disp_stats(qcedev);
2161
2162 rc = simple_read_from_buffer((void __user *) buf, len,
2163 ppos, (void *) _debug_read_buf, len);
2164
2165 return rc;
2166}
2167
2168static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2169 size_t count, loff_t *ppos)
2170{
2171
2172 int qcedev = *((int *) file->private_data);
2173
2174 memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
2175 return count;
2176};
2177
2178static const struct file_operations _debug_stats_ops = {
2179 .open = _debug_stats_open,
2180 .read = _debug_stats_read,
2181 .write = _debug_stats_write,
2182};
2183
2184static int _qcedev_debug_init(void)
2185{
2186 int rc;
2187 char name[DEBUG_MAX_FNAME];
2188 int i;
2189 struct dentry *dent;
2190
2191 _debug_dent = debugfs_create_dir("qcedev", NULL);
2192 if (IS_ERR(_debug_dent)) {
2193 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2194 PTR_ERR(_debug_dent));
2195 return PTR_ERR(_debug_dent);
2196 }
2197
2198 for (i = 0; i < MAX_QCE_DEVICE; i++) {
2199 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
2200 _debug_qcedev[i] = i;
2201 dent = debugfs_create_file(name, 0644, _debug_dent,
2202 &_debug_qcedev[i], &_debug_stats_ops);
2203 if (dent == NULL) {
2204 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2205 PTR_ERR(dent));
2206 rc = PTR_ERR(dent);
2207 goto err;
2208 }
2209 }
2210 return 0;
2211err:
2212 debugfs_remove_recursive(_debug_dent);
2213 return rc;
2214}
2215
2216static int qcedev_init(void)
2217{
2218 int rc;
2219
2220 rc = _qcedev_debug_init();
2221 if (rc)
2222 return rc;
2223 return platform_driver_register(&qcedev_plat_driver);
2224}
2225
2226static void qcedev_exit(void)
2227{
2228 debugfs_remove_recursive(_debug_dent);
2229 platform_driver_unregister(&qcedev_plat_driver);
2230}
2231
2232MODULE_LICENSE("GPL v2");
2233MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2234MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
Mona Hossain92c2ef92012-07-05 09:38:17 -07002235MODULE_VERSION("1.27");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002236
2237module_init(qcedev_init);
2238module_exit(qcedev_exit);