blob: c1a65fcb7f38cca35856e42fd90a82a73f84713c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm CE device driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/mman.h>
15#include <linux/android_pmem.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <crypto/hash.h>
32#include <linux/platform_data/qcom_crypto_device.h>
33#include <mach/scm.h>
Ramesh Masavarapu49259682011-12-02 14:00:18 -080034#include <mach/msm_bus.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070035#include <linux/qcedev.h>
36#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38
39#define CACHE_LINE_SIZE 32
40#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
41
42static uint8_t _std_init_vector_sha1_uint8[] = {
43 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
44 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
45 0xC3, 0xD2, 0xE1, 0xF0
46};
47/* standard initialization vector for SHA-256, source: FIPS 180-2 */
48static uint8_t _std_init_vector_sha256_uint8[] = {
49 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
50 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
51 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
52 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
53};
54
55enum qcedev_crypto_oper_type {
56 QCEDEV_CRYPTO_OPER_CIPHER = 0,
57 QCEDEV_CRYPTO_OPER_SHA = 1,
58 QCEDEV_CRYPTO_OPER_LAST
59};
60
Mona Hossain087c60b2011-07-20 10:34:57 -070061struct qcedev_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
63struct qcedev_cipher_req {
64 struct ablkcipher_request creq;
65 void *cookie;
66};
67
68struct qcedev_sha_req {
69 struct ahash_request sreq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070 void *cookie;
71};
72
Mona Hossain087c60b2011-07-20 10:34:57 -070073struct qcedev_sha_ctxt {
74 uint32_t auth_data[4];
75 uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
76 uint32_t diglen;
77 uint8_t trailing_buf[64];
78 uint32_t trailing_buf_len;
79 uint8_t first_blk;
80 uint8_t last_blk;
81 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
82};
83
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct qcedev_async_req {
85 struct list_head list;
86 struct completion complete;
87 enum qcedev_crypto_oper_type op_type;
88 union {
89 struct qcedev_cipher_op_req cipher_op_req;
90 struct qcedev_sha_op_req sha_op_req;
91 };
92 union{
93 struct qcedev_cipher_req cipher_req;
94 struct qcedev_sha_req sha_req;
95 };
Mona Hossain087c60b2011-07-20 10:34:57 -070096 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 int err;
98};
99
Mona Hossain650c22c2011-07-19 09:54:19 -0700100static DEFINE_MUTEX(send_cmd_lock);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800101static DEFINE_MUTEX(sent_bw_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102/**********************************************************************
103 * Register ourselves as a misc device to be able to access the dev driver
104 * from userspace. */
105
106
107#define QCEDEV_DEV "qcedev"
108
109struct qcedev_control{
110
111 /* CE features supported by platform */
112 struct msm_ce_hw_support platform_support;
113
Mona Hossain650c22c2011-07-19 09:54:19 -0700114 uint32_t ce_lock_count;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800115 uint32_t high_bw_req_count;
116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117 /* CE features/algorithms supported by HW engine*/
118 struct ce_hw_support ce_support;
119
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800120 uint32_t bus_scale_handle;
121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 /* misc device */
123 struct miscdevice miscdevice;
124
125 /* qce handle */
126 void *qce;
127
128 /* platform device */
129 struct platform_device *pdev;
130
131 unsigned magic;
132
133 struct list_head ready_commands;
134 struct qcedev_async_req *active_command;
135 spinlock_t lock;
136 struct tasklet_struct done_tasklet;
137};
138
Mona Hossain087c60b2011-07-20 10:34:57 -0700139struct qcedev_handle {
140 /* qcedev control handle */
141 struct qcedev_control *cntl;
142 /* qce internal sha context*/
143 struct qcedev_sha_ctxt sha_ctxt;
144};
145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146/*-------------------------------------------------------------------------
147* Resource Locking Service
148* ------------------------------------------------------------------------*/
149#define QCEDEV_CMD_ID 1
150#define QCEDEV_CE_LOCK_CMD 1
151#define QCEDEV_CE_UNLOCK_CMD 0
152#define NUM_RETRY 1000
153#define CE_BUSY 55
154
155static int qcedev_scm_cmd(int resource, int cmd, int *response)
156{
157#ifdef CONFIG_MSM_SCM
158
159 struct {
160 int resource;
161 int cmd;
162 } cmd_buf;
163
164 cmd_buf.resource = resource;
165 cmd_buf.cmd = cmd;
166
167 return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
168 sizeof(cmd_buf), response, sizeof(*response));
169
170#else
171 return 0;
172#endif
173}
174
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800175static int qcedev_ce_high_bw_req(struct qcedev_control *podev,
176 bool high_bw_req)
177{
178 int ret = 0;
179
180 mutex_lock(&sent_bw_req);
181 if (high_bw_req) {
182 if (podev->high_bw_req_count == 0)
183 msm_bus_scale_client_update_request(
184 podev->bus_scale_handle, 1);
185 podev->high_bw_req_count++;
186 } else {
187 if (podev->high_bw_req_count == 1)
188 msm_bus_scale_client_update_request(
189 podev->bus_scale_handle, 0);
190 podev->high_bw_req_count--;
191 }
192 mutex_unlock(&sent_bw_req);
193
194 return ret;
195}
196
197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198static int qcedev_unlock_ce(struct qcedev_control *podev)
199{
Mona Hossain650c22c2011-07-19 09:54:19 -0700200 int ret = 0;
201
202 mutex_lock(&send_cmd_lock);
203 if (podev->ce_lock_count == 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 int response = 0;
205
206 if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
207 QCEDEV_CE_UNLOCK_CMD, &response)) {
Mona Hossain650c22c2011-07-19 09:54:19 -0700208 pr_err("Failed to release CE lock\n");
209 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700212 if (ret == 0) {
213 if (podev->ce_lock_count)
214 podev->ce_lock_count--;
215 else {
216 /* We should never be here */
217 ret = -EIO;
218 pr_err("CE hardware is already unlocked\n");
219 }
220 }
221 mutex_unlock(&send_cmd_lock);
222
223 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224}
225
226static int qcedev_lock_ce(struct qcedev_control *podev)
227{
Mona Hossain650c22c2011-07-19 09:54:19 -0700228 int ret = 0;
229
230 mutex_lock(&send_cmd_lock);
231 if (podev->ce_lock_count == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232 int response = -CE_BUSY;
233 int i = 0;
234
235 do {
236 if (qcedev_scm_cmd(
237 podev->platform_support.shared_ce_resource,
238 QCEDEV_CE_LOCK_CMD, &response)) {
239 response = -EINVAL;
240 break;
241 }
242 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
243
Mona Hossain650c22c2011-07-19 09:54:19 -0700244 if ((response == -CE_BUSY) && (i >= NUM_RETRY)) {
245 ret = -EUSERS;
246 } else {
247 if (response < 0)
248 ret = -EINVAL;
249 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700251 if (ret == 0)
252 podev->ce_lock_count++;
253 mutex_unlock(&send_cmd_lock);
254 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255}
256
257#define QCEDEV_MAGIC 0x56434544 /* "qced" */
258
259static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
260static int qcedev_open(struct inode *inode, struct file *file);
261static int qcedev_release(struct inode *inode, struct file *file);
262static int start_cipher_req(struct qcedev_control *podev);
Mona Hossain650c22c2011-07-19 09:54:19 -0700263static int start_sha_req(struct qcedev_control *podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264
265static const struct file_operations qcedev_fops = {
266 .owner = THIS_MODULE,
267 .unlocked_ioctl = qcedev_ioctl,
268 .open = qcedev_open,
269 .release = qcedev_release,
270};
271
272static struct qcedev_control qce_dev[] = {
273 {
274 .miscdevice = {
275 .minor = MISC_DYNAMIC_MINOR,
276 .name = "qce",
277 .fops = &qcedev_fops,
278 },
279 .magic = QCEDEV_MAGIC,
280 },
281};
282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
284#define DEBUG_MAX_FNAME 16
285#define DEBUG_MAX_RW_BUF 1024
286
287struct qcedev_stat {
288 u32 qcedev_dec_success;
289 u32 qcedev_dec_fail;
290 u32 qcedev_enc_success;
291 u32 qcedev_enc_fail;
292 u32 qcedev_sha_success;
293 u32 qcedev_sha_fail;
294};
295
296static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
297static struct dentry *_debug_dent;
298static char _debug_read_buf[DEBUG_MAX_RW_BUF];
299static int _debug_qcedev[MAX_QCE_DEVICE];
300
301static struct qcedev_control *qcedev_minor_to_control(unsigned n)
302{
303 int i;
304
305 for (i = 0; i < MAX_QCE_DEVICE; i++) {
306 if (qce_dev[i].miscdevice.minor == n)
307 return &qce_dev[i];
308 }
309 return NULL;
310}
311
312static int qcedev_open(struct inode *inode, struct file *file)
313{
Mona Hossain087c60b2011-07-20 10:34:57 -0700314 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 struct qcedev_control *podev;
316
317 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
318 if (podev == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700319 pr_err("%s: no such device %d\n", __func__,
320 MINOR(inode->i_rdev));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 return -ENOENT;
322 }
323
Mona Hossain087c60b2011-07-20 10:34:57 -0700324 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
325 if (handle == NULL) {
326 pr_err("Failed to allocate memory %ld\n",
327 PTR_ERR(handle));
328 return -ENOMEM;
329 }
330
331 handle->cntl = podev;
332 file->private_data = handle;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800333 if (podev->platform_support.bus_scale_table != NULL)
334 return qcedev_ce_high_bw_req(podev, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 return 0;
336}
337
338static int qcedev_release(struct inode *inode, struct file *file)
339{
340 struct qcedev_control *podev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700341 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342
Mona Hossain087c60b2011-07-20 10:34:57 -0700343 handle = file->private_data;
344 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700346 pr_err("%s: invalid handle %p\n",
347 __func__, podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700349 kzfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 file->private_data = NULL;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800351 if (podev->platform_support.bus_scale_table != NULL)
352 return qcedev_ce_high_bw_req(podev, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 return 0;
354}
355
356static void req_done(unsigned long data)
357{
358 struct qcedev_control *podev = (struct qcedev_control *)data;
359 struct qcedev_async_req *areq;
360 unsigned long flags = 0;
361 struct qcedev_async_req *new_req = NULL;
362 int ret = 0;
363
364 spin_lock_irqsave(&podev->lock, flags);
365 areq = podev->active_command;
366 podev->active_command = NULL;
367
368again:
369 if (!list_empty(&podev->ready_commands)) {
370 new_req = container_of(podev->ready_commands.next,
371 struct qcedev_async_req, list);
372 list_del(&new_req->list);
373 podev->active_command = new_req;
374 new_req->err = 0;
375 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
376 ret = start_cipher_req(podev);
377 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700378 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 }
380
381 spin_unlock_irqrestore(&podev->lock, flags);
382
383 if (areq)
384 complete(&areq->complete);
385
386 if (new_req && ret) {
387 complete(&new_req->complete);
388 spin_lock_irqsave(&podev->lock, flags);
389 podev->active_command = NULL;
390 areq = NULL;
391 ret = 0;
392 new_req = NULL;
393 goto again;
394 }
395
396 return;
397}
398
399static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
400 unsigned char *authdata, int ret)
401{
402 struct qcedev_sha_req *areq;
403 struct qcedev_control *pdev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700404 struct qcedev_handle *handle;
405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 uint32_t *auth32 = (uint32_t *)authdata;
407
408 areq = (struct qcedev_sha_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700409 handle = (struct qcedev_handle *) areq->cookie;
410 pdev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411
412 if (digest)
Mona Hossain087c60b2011-07-20 10:34:57 -0700413 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414
415 if (authdata) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700416 handle->sha_ctxt.auth_data[0] = auth32[0];
417 handle->sha_ctxt.auth_data[1] = auth32[1];
418 handle->sha_ctxt.auth_data[2] = auth32[2];
419 handle->sha_ctxt.auth_data[3] = auth32[3];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 }
421
422 tasklet_schedule(&pdev->done_tasklet);
423};
424
425
426static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
427 unsigned char *iv, int ret)
428{
429 struct qcedev_cipher_req *areq;
Mona Hossain087c60b2011-07-20 10:34:57 -0700430 struct qcedev_handle *handle;
431 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 struct qcedev_async_req *qcedev_areq;
433
434 areq = (struct qcedev_cipher_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700435 handle = (struct qcedev_handle *) areq->cookie;
436 podev = handle->cntl;
437 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438
439 if (iv)
440 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
441 qcedev_areq->cipher_op_req.ivlen);
Mona Hossain087c60b2011-07-20 10:34:57 -0700442 tasklet_schedule(&podev->done_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443};
444
445static int start_cipher_req(struct qcedev_control *podev)
446{
447 struct qcedev_async_req *qcedev_areq;
448 struct qce_req creq;
449 int ret = 0;
450
451 /* start the command on the podev->active_command */
452 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453
Mona Hossain087c60b2011-07-20 10:34:57 -0700454 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455 creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
456 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
457 creq.pmem = &qcedev_areq->cipher_op_req.pmem;
458 else
459 creq.pmem = NULL;
460
461 switch (qcedev_areq->cipher_op_req.alg) {
462 case QCEDEV_ALG_DES:
463 creq.alg = CIPHER_ALG_DES;
464 break;
465 case QCEDEV_ALG_3DES:
466 creq.alg = CIPHER_ALG_3DES;
467 break;
468 case QCEDEV_ALG_AES:
469 creq.alg = CIPHER_ALG_AES;
470 break;
471 default:
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -0700472 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 };
474
475 switch (qcedev_areq->cipher_op_req.mode) {
476 case QCEDEV_AES_MODE_CBC:
477 case QCEDEV_DES_MODE_CBC:
478 creq.mode = QCE_MODE_CBC;
479 break;
480 case QCEDEV_AES_MODE_ECB:
481 case QCEDEV_DES_MODE_ECB:
482 creq.mode = QCE_MODE_ECB;
483 break;
484 case QCEDEV_AES_MODE_CTR:
485 creq.mode = QCE_MODE_CTR;
486 break;
487 case QCEDEV_AES_MODE_XTS:
488 creq.mode = QCE_MODE_XTS;
489 break;
490 default:
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -0700491 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 };
493
494 if ((creq.alg == CIPHER_ALG_AES) &&
495 (creq.mode == QCE_MODE_CTR)) {
496 creq.dir = QCE_ENCRYPT;
497 } else {
498 if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
499 creq.dir = QCE_ENCRYPT;
500 else
501 creq.dir = QCE_DECRYPT;
502 }
503
504 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
505 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
506
507 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
508 creq.encklen = qcedev_areq->cipher_op_req.encklen;
509
510 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
511
512 if (qcedev_areq->cipher_op_req.encklen == 0) {
513 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
514 || (qcedev_areq->cipher_op_req.op ==
515 QCEDEV_OPER_DEC_NO_KEY))
516 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
517 else {
518 int i;
519
520 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
521 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
522 break;
523 }
524
525 if ((podev->platform_support.hw_key_support == 1) &&
526 (i == QCEDEV_MAX_KEY_SIZE))
527 creq.op = QCE_REQ_ABLK_CIPHER;
528 else {
529 ret = -EINVAL;
530 goto unsupported;
531 }
532 }
533 } else {
534 creq.op = QCE_REQ_ABLK_CIPHER;
535 }
536
537 creq.qce_cb = qcedev_cipher_req_cb;
538 creq.areq = (void *)&qcedev_areq->cipher_req;
539
540 ret = qce_ablk_cipher_req(podev->qce, &creq);
541unsupported:
542 if (ret)
543 qcedev_areq->err = -ENXIO;
544 else
545 qcedev_areq->err = 0;
546 return ret;
547};
548
Mona Hossain650c22c2011-07-19 09:54:19 -0700549static int start_sha_req(struct qcedev_control *podev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550{
551 struct qcedev_async_req *qcedev_areq;
552 struct qce_sha_req sreq;
553 int ret = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700554 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555
556 /* start the command on the podev->active_command */
557 qcedev_areq = podev->active_command;
Mona Hossain087c60b2011-07-20 10:34:57 -0700558 handle = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559
560 switch (qcedev_areq->sha_op_req.alg) {
561 case QCEDEV_ALG_SHA1:
562 sreq.alg = QCE_HASH_SHA1;
563 break;
564 case QCEDEV_ALG_SHA256:
565 sreq.alg = QCE_HASH_SHA256;
566 break;
567 case QCEDEV_ALG_SHA1_HMAC:
568 if (podev->ce_support.sha_hmac) {
569 sreq.alg = QCE_HASH_SHA1_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700570 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571
572 } else {
573 sreq.alg = QCE_HASH_SHA1;
574 sreq.authkey = NULL;
575 }
576 break;
577 case QCEDEV_ALG_SHA256_HMAC:
578 if (podev->ce_support.sha_hmac) {
579 sreq.alg = QCE_HASH_SHA256_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700580 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581
582 } else {
583 sreq.alg = QCE_HASH_SHA256;
584 sreq.authkey = NULL;
585 }
586 break;
587 case QCEDEV_ALG_AES_CMAC:
588 sreq.alg = QCE_HASH_AES_CMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700589 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 sreq.authklen = qcedev_areq->sha_op_req.authklen;
591 break;
592 default:
593 break;
594 };
595
Mona Hossain087c60b2011-07-20 10:34:57 -0700596 qcedev_areq->sha_req.cookie = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597
598 sreq.qce_cb = qcedev_sha_req_cb;
599 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700600 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
601 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
602 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
603 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
604 sreq.digest = &handle->sha_ctxt.digest[0];
605 sreq.first_blk = handle->sha_ctxt.first_blk;
606 sreq.last_blk = handle->sha_ctxt.last_blk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 }
608 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
609 sreq.src = qcedev_areq->sha_req.sreq.src;
610 sreq.areq = (void *)&qcedev_areq->sha_req;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611
612 ret = qce_process_sha_req(podev->qce, &sreq);
613
614 if (ret)
615 qcedev_areq->err = -ENXIO;
616 else
617 qcedev_areq->err = 0;
618 return ret;
619};
620
621static int submit_req(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700622 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623{
Mona Hossain087c60b2011-07-20 10:34:57 -0700624 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 unsigned long flags = 0;
626 int ret = 0;
627 struct qcedev_stat *pstat;
628
629 qcedev_areq->err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700630 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631
Mona Hossain650c22c2011-07-19 09:54:19 -0700632 if (podev->platform_support.ce_shared) {
633 ret = qcedev_lock_ce(podev);
634 if (ret)
635 return ret;
636 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637
638 spin_lock_irqsave(&podev->lock, flags);
639
640 if (podev->active_command == NULL) {
641 podev->active_command = qcedev_areq;
642 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
643 ret = start_cipher_req(podev);
644 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700645 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 } else {
647 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
648 }
649
650 if (ret != 0)
651 podev->active_command = NULL;
652
653 spin_unlock_irqrestore(&podev->lock, flags);
654
655 if (ret == 0)
656 wait_for_completion(&qcedev_areq->complete);
657
Mona Hossain650c22c2011-07-19 09:54:19 -0700658 if (podev->platform_support.ce_shared)
659 ret = qcedev_unlock_ce(podev);
660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 if (ret)
Mona Hossain650c22c2011-07-19 09:54:19 -0700662 qcedev_areq->err = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663
664 pstat = &_qcedev_stat[podev->pdev->id];
665 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
666 switch (qcedev_areq->cipher_op_req.op) {
667 case QCEDEV_OPER_DEC:
668 if (qcedev_areq->err)
669 pstat->qcedev_dec_fail++;
670 else
671 pstat->qcedev_dec_success++;
672 break;
673 case QCEDEV_OPER_ENC:
674 if (qcedev_areq->err)
675 pstat->qcedev_enc_fail++;
676 else
677 pstat->qcedev_enc_success++;
678 break;
679 default:
680 break;
681 };
682 } else {
683 if (qcedev_areq->err)
684 pstat->qcedev_sha_fail++;
685 else
686 pstat->qcedev_sha_success++;
687 }
688
689 return qcedev_areq->err;
690}
691
692static int qcedev_sha_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700693 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694{
Mona Hossain087c60b2011-07-20 10:34:57 -0700695 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696
697 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
698 sha_ctxt->first_blk = 1;
699
700 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
701 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
702 memcpy(&sha_ctxt->digest[0],
703 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
704 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
705 } else {
706 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
707 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
708 memcpy(&sha_ctxt->digest[0],
709 &_std_init_vector_sha256_uint8[0],
710 SHA256_DIGEST_SIZE);
711 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
712 }
713 }
714 return 0;
715}
716
717
718static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700719 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720{
721 int err = 0;
722 int i = 0;
723 struct scatterlist sg_src[2];
724 uint32_t total;
725
726 uint8_t *user_src = NULL;
727 uint8_t *k_src = NULL;
728 uint8_t *k_buf_src = NULL;
729 uint8_t *k_align_src = NULL;
730
731 uint32_t sha_pad_len = 0;
732 uint32_t trailing_buf_len = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700733 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 uint32_t sha_block_size;
735
736 total = qcedev_areq->sha_op_req.data_len + t_buf;
737
738 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
739 sha_block_size = SHA1_BLOCK_SIZE;
740 else
741 sha_block_size = SHA256_BLOCK_SIZE;
742
743 if (total <= sha_block_size) {
744 uint32_t len = qcedev_areq->sha_op_req.data_len;
745
746 i = 0;
747
Mona Hossain087c60b2011-07-20 10:34:57 -0700748 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749
750 /* Copy data from user src(s) */
751 while (len > 0) {
752 user_src =
753 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
754 if (user_src && __copy_from_user(k_src,
755 (void __user *)user_src,
756 qcedev_areq->sha_op_req.data[i].len))
757 return -EFAULT;
758
759 len -= qcedev_areq->sha_op_req.data[i].len;
760 k_src += qcedev_areq->sha_op_req.data[i].len;
761 i++;
762 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700763 handle->sha_ctxt.trailing_buf_len = total;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764
765 return 0;
766 }
767
768
769 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
770 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700771 if (k_buf_src == NULL) {
772 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
773 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700775 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776
777 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
778 CACHE_LINE_SIZE);
779 k_src = k_align_src;
780
781 /* check for trailing buffer from previous updates and append it */
782 if (t_buf > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700783 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 t_buf);
785 k_src += t_buf;
786 }
787
788 /* Copy data from user src(s) */
789 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
790 if (user_src && __copy_from_user(k_src,
791 (void __user *)user_src,
792 qcedev_areq->sha_op_req.data[0].len)) {
793 kfree(k_buf_src);
794 return -EFAULT;
795 }
796 k_src += qcedev_areq->sha_op_req.data[0].len;
797 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
798 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
799 if (user_src && __copy_from_user(k_src,
800 (void __user *)user_src,
801 qcedev_areq->sha_op_req.data[i].len)) {
802 kfree(k_buf_src);
803 return -EFAULT;
804 }
805 k_src += qcedev_areq->sha_op_req.data[i].len;
806 }
807
808 /* get new trailing buffer */
809 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
810 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
811
812 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
813 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
814 total-trailing_buf_len);
815 sg_mark_end(qcedev_areq->sha_req.sreq.src);
816
817 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
818
819 /* update sha_ctxt trailing buf content to new trailing buf */
820 if (trailing_buf_len > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700821 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
822 memcpy(&handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 (k_src - trailing_buf_len),
824 trailing_buf_len);
825 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700826 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827
Mona Hossain087c60b2011-07-20 10:34:57 -0700828 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829
Mona Hossain087c60b2011-07-20 10:34:57 -0700830 handle->sha_ctxt.last_blk = 0;
831 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832
833 kfree(k_buf_src);
834 return err;
835}
836
837static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700838 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839{
840 int err = 0;
841 int i = 0;
842 int j = 0;
843 int k = 0;
844 int num_entries = 0;
845 uint32_t total = 0;
846
847 /* verify address src(s) */
848 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
849 if (!access_ok(VERIFY_READ,
850 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
851 qcedev_areq->sha_op_req.data[i].len))
852 return -EFAULT;
853
854 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
855
856 struct qcedev_sha_op_req *saved_req;
857 struct qcedev_sha_op_req req;
858 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
859
860 /* save the original req structure */
861 saved_req =
862 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
863 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700864 pr_err("%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700865 __func__, (uint32_t)saved_req);
866 return -ENOMEM;
867 }
868 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
869 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
870
871 i = 0;
872 /* Address 32 KB at a time */
873 while ((i < req.entries) && (err == 0)) {
874 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
875 sreq->data[0].len = QCE_MAX_OPER_DATA;
876 if (i > 0) {
877 sreq->data[0].vaddr =
878 sreq->data[i].vaddr;
879 }
880
881 sreq->data_len = QCE_MAX_OPER_DATA;
882 sreq->entries = 1;
883
884 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700885 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886
887 sreq->data[i].len = req.data[i].len -
888 QCE_MAX_OPER_DATA;
889 sreq->data[i].vaddr = req.data[i].vaddr +
890 QCE_MAX_OPER_DATA;
891 req.data[i].vaddr = sreq->data[i].vaddr;
892 req.data[i].len = sreq->data[i].len;
893 } else {
894 total = 0;
895 for (j = i; j < req.entries; j++) {
896 num_entries++;
897 if ((total + sreq->data[j].len) >=
898 QCE_MAX_OPER_DATA) {
899 sreq->data[j].len =
900 (QCE_MAX_OPER_DATA - total);
901 total = QCE_MAX_OPER_DATA;
902 break;
903 }
904 total += sreq->data[j].len;
905 }
906
907 sreq->data_len = total;
908 if (i > 0)
909 for (k = 0; k < num_entries; k++) {
910 sreq->data[k].len =
911 sreq->data[i+k].len;
912 sreq->data[k].vaddr =
913 sreq->data[i+k].vaddr;
914 }
915 sreq->entries = num_entries;
916
917 i = j;
918 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700919 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 num_entries = 0;
921
922 sreq->data[i].vaddr = req.data[i].vaddr +
923 sreq->data[i].len;
924 sreq->data[i].len = req.data[i].len -
925 sreq->data[i].len;
926 req.data[i].vaddr = sreq->data[i].vaddr;
927 req.data[i].len = sreq->data[i].len;
928
929 if (sreq->data[i].len == 0)
930 i++;
931 }
932 } /* end of while ((i < req.entries) && (err == 0)) */
933
934 /* Restore the original req structure */
935 for (i = 0; i < saved_req->entries; i++) {
936 sreq->data[i].len = saved_req->data[i].len;
937 sreq->data[i].vaddr = saved_req->data[i].vaddr;
938 }
939 sreq->entries = saved_req->entries;
940 sreq->data_len = saved_req->data_len;
941 kfree(saved_req);
942 } else
Mona Hossain087c60b2011-07-20 10:34:57 -0700943 err = qcedev_sha_update_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944
945 return err;
946}
947
948static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700949 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950{
951 int err = 0;
952 struct scatterlist sg_src;
953 uint32_t total;
954
955 uint8_t *k_buf_src = NULL;
956 uint8_t *k_align_src = NULL;
957
Mona Hossain087c60b2011-07-20 10:34:57 -0700958 handle->sha_ctxt.first_blk = 0;
959 handle->sha_ctxt.last_blk = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960
Mona Hossain087c60b2011-07-20 10:34:57 -0700961 total = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962
963 if (total) {
964 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
965 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700966 if (k_buf_src == NULL) {
967 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
968 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700970 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971
972 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
973 CACHE_LINE_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -0700974 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700976 handle->sha_ctxt.last_blk = 1;
977 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978
979 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
980 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
981 sg_mark_end(qcedev_areq->sha_req.sreq.src);
982
983 qcedev_areq->sha_req.sreq.nbytes = total;
984
Mona Hossain087c60b2011-07-20 10:34:57 -0700985 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986
Mona Hossain087c60b2011-07-20 10:34:57 -0700987 handle->sha_ctxt.first_blk = 0;
988 handle->sha_ctxt.last_blk = 0;
989 handle->sha_ctxt.auth_data[0] = 0;
990 handle->sha_ctxt.auth_data[1] = 0;
991 handle->sha_ctxt.trailing_buf_len = 0;
992 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993
994 kfree(k_buf_src);
995 return err;
996}
997
998static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700999 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000{
1001 int err = 0;
1002 int i = 0;
1003 struct scatterlist sg_src[2];
1004 uint32_t total;
1005
1006 uint8_t *user_src = NULL;
1007 uint8_t *k_src = NULL;
1008 uint8_t *k_buf_src = NULL;
1009
1010 total = qcedev_areq->sha_op_req.data_len;
1011
1012 /* verify address src(s) */
1013 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
1014 if (!access_ok(VERIFY_READ,
1015 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
1016 qcedev_areq->sha_op_req.data[i].len))
1017 return -EFAULT;
1018
1019 /* Verify Source Address */
1020 if (!access_ok(VERIFY_READ,
1021 (void __user *)qcedev_areq->sha_op_req.authkey,
1022 qcedev_areq->sha_op_req.authklen))
1023 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001024 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001025 (void __user *)qcedev_areq->sha_op_req.authkey,
1026 qcedev_areq->sha_op_req.authklen))
1027 return -EFAULT;
1028
1029
1030 k_buf_src = kmalloc(total, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001031 if (k_buf_src == NULL) {
1032 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
1033 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001035 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036
1037 k_src = k_buf_src;
1038
1039 /* Copy data from user src(s) */
1040 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
1041 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
1042 user_src =
1043 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
1044 if (user_src && __copy_from_user(k_src, (void __user *)user_src,
1045 qcedev_areq->sha_op_req.data[i].len)) {
1046 kfree(k_buf_src);
1047 return -EFAULT;
1048 }
1049 k_src += qcedev_areq->sha_op_req.data[i].len;
1050 }
1051
1052 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
1053 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
1054 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1055
1056 qcedev_areq->sha_req.sreq.nbytes = total;
Mona Hossain087c60b2011-07-20 10:34:57 -07001057 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
1058 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059
1060 kfree(k_buf_src);
1061 return err;
1062}
1063
1064static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001065 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066{
1067 int err = 0;
1068
1069 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
1070 /* Verify Source Address */
1071 if (!access_ok(VERIFY_READ,
1072 (void __user *)areq->sha_op_req.authkey,
1073 areq->sha_op_req.authklen))
1074 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001075 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 (void __user *)areq->sha_op_req.authkey,
1077 areq->sha_op_req.authklen))
1078 return -EFAULT;
1079 } else {
1080 struct qcedev_async_req authkey_areq;
1081
1082 init_completion(&authkey_areq.complete);
1083
1084 authkey_areq.sha_op_req.entries = 1;
1085 authkey_areq.sha_op_req.data[0].vaddr =
1086 areq->sha_op_req.authkey;
1087 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
1088 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
1089 authkey_areq.sha_op_req.diglen = 0;
1090 memset(&authkey_areq.sha_op_req.digest[0], 0,
1091 QCEDEV_MAX_SHA_DIGEST);
1092 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1093 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1094 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1095 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1096
1097 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1098
Mona Hossain087c60b2011-07-20 10:34:57 -07001099 qcedev_sha_init(&authkey_areq, handle);
1100 err = qcedev_sha_update(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 if (!err)
Mona Hossain087c60b2011-07-20 10:34:57 -07001102 err = qcedev_sha_final(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 else
1104 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001105 memcpy(&handle->sha_ctxt.authkey[0],
1106 &handle->sha_ctxt.digest[0],
1107 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 }
1109 return err;
1110}
1111
1112static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001113 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114{
1115 int err = 0;
1116 struct scatterlist sg_src;
1117 uint8_t *k_src = NULL;
1118 uint32_t sha_block_size = 0;
1119 uint32_t sha_digest_size = 0;
1120
1121 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1122 sha_digest_size = SHA1_DIGEST_SIZE;
1123 sha_block_size = SHA1_BLOCK_SIZE;
1124 } else {
1125 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1126 sha_digest_size = SHA256_DIGEST_SIZE;
1127 sha_block_size = SHA256_BLOCK_SIZE;
1128 }
1129 }
1130 k_src = kmalloc(sha_block_size, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001131 if (k_src == NULL) {
1132 pr_err("%s: Can't Allocate memory: k_src 0x%x\n",
1133 __func__, (uint32_t)k_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001135 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136
1137 /* check for trailing buffer from previous updates and append it */
Mona Hossain087c60b2011-07-20 10:34:57 -07001138 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1139 handle->sha_ctxt.trailing_buf_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140
1141 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1142 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1143 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1144
1145 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
Mona Hossain087c60b2011-07-20 10:34:57 -07001146 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1147 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148 sha_digest_size);
Mona Hossain087c60b2011-07-20 10:34:57 -07001149 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150
Mona Hossain087c60b2011-07-20 10:34:57 -07001151 handle->sha_ctxt.first_blk = 1;
1152 handle->sha_ctxt.last_blk = 0;
1153 handle->sha_ctxt.auth_data[0] = 0;
1154 handle->sha_ctxt.auth_data[1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155
1156 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001157 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001159 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001160 }
1161
1162 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001163 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001165 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 }
Mona Hossain087c60b2011-07-20 10:34:57 -07001167 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168
Mona Hossain087c60b2011-07-20 10:34:57 -07001169 handle->sha_ctxt.last_blk = 0;
1170 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171
1172 kfree(k_src);
1173 return err;
1174}
1175
1176static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001177 struct qcedev_handle *handle, bool ikey)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001178{
1179 int i;
1180 uint32_t constant;
1181 uint32_t sha_block_size;
1182
1183 if (ikey)
1184 constant = 0x36;
1185 else
1186 constant = 0x5c;
1187
1188 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1189 sha_block_size = SHA1_BLOCK_SIZE;
1190 else
1191 sha_block_size = SHA256_BLOCK_SIZE;
1192
Mona Hossain087c60b2011-07-20 10:34:57 -07001193 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 for (i = 0; i < sha_block_size; i++)
Mona Hossain087c60b2011-07-20 10:34:57 -07001195 handle->sha_ctxt.trailing_buf[i] =
1196 (handle->sha_ctxt.authkey[i] ^ constant);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001197
Mona Hossain087c60b2011-07-20 10:34:57 -07001198 handle->sha_ctxt.trailing_buf_len = sha_block_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 return 0;
1200}
1201
1202static int qcedev_hmac_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001203 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204{
1205 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001206 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207
Mona Hossain087c60b2011-07-20 10:34:57 -07001208 qcedev_sha_init(areq, handle);
1209 err = qcedev_set_hmac_auth_key(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 if (err)
1211 return err;
1212 if (!podev->ce_support.sha_hmac)
Mona Hossain087c60b2011-07-20 10:34:57 -07001213 qcedev_hmac_update_iokey(areq, handle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 return 0;
1215}
1216
1217static int qcedev_hmac_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001218 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219{
1220 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001221 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222
Mona Hossain087c60b2011-07-20 10:34:57 -07001223 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 if (podev->ce_support.sha_hmac)
1225 return err;
1226
Mona Hossain087c60b2011-07-20 10:34:57 -07001227 qcedev_hmac_update_iokey(areq, handle, false);
1228 err = qcedev_hmac_get_ohash(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 if (err)
1230 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001231 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001232
1233 return err;
1234}
1235
1236static int qcedev_hash_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001237 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238{
1239 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1240 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001241 return qcedev_sha_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001243 return qcedev_hmac_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001244}
1245
1246static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001247 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248{
Mona Hossain087c60b2011-07-20 10:34:57 -07001249 return qcedev_sha_update(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250}
1251
1252static int qcedev_hash_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001253 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254{
1255 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1256 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001257 return qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001259 return qcedev_hmac_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001260}
1261
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001262#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001264 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265{
1266 int i = 0;
1267 int err = 0;
1268 struct scatterlist *sg_src = NULL;
1269 struct scatterlist *sg_dst = NULL;
1270 struct scatterlist *sg_ndex = NULL;
1271 struct file *file_src = NULL;
1272 struct file *file_dst = NULL;
1273 unsigned long paddr;
1274 unsigned long kvaddr;
1275 unsigned long len;
1276
1277 sg_src = kmalloc((sizeof(struct scatterlist) *
1278 areq->cipher_op_req.entries), GFP_KERNEL);
1279 if (sg_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001280 pr_err("%s: Can't Allocate memory:sg_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281 __func__, (uint32_t)sg_src);
1282 return -ENOMEM;
1283
1284 }
1285 memset(sg_src, 0, (sizeof(struct scatterlist) *
1286 areq->cipher_op_req.entries));
1287 sg_ndex = sg_src;
1288 areq->cipher_req.creq.src = sg_src;
1289
1290 /* address src */
1291 get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
1292 &kvaddr, &len, &file_src);
1293
1294 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1295 sg_set_buf(sg_ndex,
1296 ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
1297 areq->cipher_op_req.pmem.src[i].len);
1298 sg_ndex++;
1299 }
1300 sg_mark_end(--sg_ndex);
1301
1302 for (i = 0; i < areq->cipher_op_req.entries; i++)
1303 areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
1304
1305 /* address dst */
1306 /* If not place encryption/decryption */
1307 if (areq->cipher_op_req.in_place_op != 1) {
1308 sg_dst = kmalloc((sizeof(struct scatterlist) *
1309 areq->cipher_op_req.entries), GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001310 if (sg_dst == NULL) {
1311 pr_err("%s: Can't Allocate memory: sg_dst 0x%x\n",
1312 __func__, (uint32_t)sg_dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001313 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001314 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315 memset(sg_dst, 0, (sizeof(struct scatterlist) *
1316 areq->cipher_op_req.entries));
1317 areq->cipher_req.creq.dst = sg_dst;
1318 sg_ndex = sg_dst;
1319
1320 get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
1321 &kvaddr, &len, &file_dst);
1322 for (i = 0; i < areq->cipher_op_req.entries; i++)
1323 sg_set_buf(sg_ndex++,
1324 ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
1325 + kvaddr), areq->cipher_op_req.pmem.dst[i].len);
1326 sg_mark_end(--sg_ndex);
1327
1328 for (i = 0; i < areq->cipher_op_req.entries; i++)
1329 areq->cipher_op_req.pmem.dst[i].offset +=
1330 (uint32_t)paddr;
1331 } else {
1332 areq->cipher_req.creq.dst = sg_src;
1333 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1334 areq->cipher_op_req.pmem.dst[i].offset =
1335 areq->cipher_op_req.pmem.src[i].offset;
1336 areq->cipher_op_req.pmem.dst[i].len =
1337 areq->cipher_op_req.pmem.src[i].len;
1338 }
1339 }
1340
1341 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1342 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1343
Mona Hossain087c60b2011-07-20 10:34:57 -07001344 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001345
1346 kfree(sg_src);
1347 kfree(sg_dst);
1348
1349 if (file_dst)
1350 put_pmem_file(file_dst);
1351 if (file_src)
1352 put_pmem_file(file_src);
1353
1354 return err;
1355};
1356
1357
1358static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001359 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360{
1361 int err = 0;
1362 int i = 0;
1363 int j = 0;
1364 int k = 0;
1365 int num_entries = 0;
1366 uint32_t total = 0;
1367 struct qcedev_cipher_op_req *saved_req;
1368 struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
1369
1370 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1371 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001372 pr_err(KERN_ERR "%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001373 __func__, (uint32_t)saved_req);
1374 return -ENOMEM;
1375 }
1376 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1377
1378 if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
1379
1380 struct qcedev_cipher_op_req req;
1381
1382 /* save the original req structure */
1383 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1384
1385 i = 0;
1386 /* Address 32 KB at a time */
1387 while ((i < req.entries) && (err == 0)) {
1388 if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
1389 creq->pmem.src[0].len = QCE_MAX_OPER_DATA;
1390 if (i > 0) {
1391 creq->pmem.src[0].offset =
1392 creq->pmem.src[i].offset;
1393 }
1394
1395 creq->data_len = QCE_MAX_OPER_DATA;
1396 creq->entries = 1;
1397
1398 err =
1399 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001400 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401
1402 creq->pmem.src[i].len = req.pmem.src[i].len -
1403 QCE_MAX_OPER_DATA;
1404 creq->pmem.src[i].offset =
1405 req.pmem.src[i].offset +
1406 QCE_MAX_OPER_DATA;
1407 req.pmem.src[i].offset =
1408 creq->pmem.src[i].offset;
1409 req.pmem.src[i].len = creq->pmem.src[i].len;
1410 } else {
1411 total = 0;
1412 for (j = i; j < req.entries; j++) {
1413 num_entries++;
1414 if ((total + creq->pmem.src[j].len)
1415 >= QCE_MAX_OPER_DATA) {
1416 creq->pmem.src[j].len =
1417 QCE_MAX_OPER_DATA - total;
1418 total = QCE_MAX_OPER_DATA;
1419 break;
1420 }
1421 total += creq->pmem.src[j].len;
1422 }
1423
1424 creq->data_len = total;
1425 if (i > 0)
1426 for (k = 0; k < num_entries; k++) {
1427 creq->pmem.src[k].len =
1428 creq->pmem.src[i+k].len;
1429 creq->pmem.src[k].offset =
1430 creq->pmem.src[i+k].offset;
1431 }
1432 creq->entries = num_entries;
1433
1434 i = j;
1435 err =
1436 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001437 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001438 num_entries = 0;
1439
1440 creq->pmem.src[i].offset =
1441 req.pmem.src[i].offset +
1442 creq->pmem.src[i].len;
1443 creq->pmem.src[i].len =
1444 req.pmem.src[i].len -
1445 creq->pmem.src[i].len;
1446 req.pmem.src[i].offset =
1447 creq->pmem.src[i].offset;
1448 req.pmem.src[i].len =
1449 creq->pmem.src[i].len;
1450
1451 if (creq->pmem.src[i].len == 0)
1452 i++;
1453 }
1454
1455 } /* end of while ((i < req.entries) && (err == 0)) */
1456
1457 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001458 err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459
1460 /* Restore the original req structure */
1461 for (i = 0; i < saved_req->entries; i++) {
1462 creq->pmem.src[i].len = saved_req->pmem.src[i].len;
1463 creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
1464 }
1465 creq->entries = saved_req->entries;
1466 creq->data_len = saved_req->data_len;
1467 kfree(saved_req);
1468
1469 return err;
1470
1471}
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001472#else
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001473static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
1474 struct qcedev_handle *handle)
1475{
1476 return -EPERM;
1477}
1478#endif/*CONFIG_ANDROID_PMEM*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479
1480static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001481 int *di, struct qcedev_handle *handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 uint8_t *k_align_src)
1483{
1484 int err = 0;
1485 int i = 0;
1486 int dst_i = *di;
1487 struct scatterlist sg_src;
1488 uint32_t byteoffset = 0;
1489 uint8_t *user_src = NULL;
1490 uint8_t *k_align_dst = k_align_src;
1491 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1492
1493
1494 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1495 byteoffset = areq->cipher_op_req.byteoffset;
1496
1497 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1498 if (user_src && __copy_from_user((k_align_src + byteoffset),
1499 (void __user *)user_src,
1500 areq->cipher_op_req.vbuf.src[0].len))
1501 return -EFAULT;
1502
1503 k_align_src += areq->cipher_op_req.vbuf.src[0].len;
1504
1505 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1506 user_src =
1507 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1508 if (user_src && __copy_from_user(k_align_src,
1509 (void __user *)user_src,
1510 areq->cipher_op_req.vbuf.src[i].len)) {
1511 return -EFAULT;
1512 }
1513 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1514 }
1515
1516 /* restore src beginning */
1517 k_align_src = k_align_dst;
1518 areq->cipher_op_req.data_len += byteoffset;
1519
1520 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1521 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1522
1523 /* In place encryption/decryption */
1524 sg_set_buf(areq->cipher_req.creq.src,
1525 k_align_dst,
1526 areq->cipher_op_req.data_len);
1527 sg_mark_end(areq->cipher_req.creq.src);
1528
1529 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1530 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1531 areq->cipher_op_req.entries = 1;
1532
Mona Hossain087c60b2011-07-20 10:34:57 -07001533 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001534
1535 /* copy data to destination buffer*/
1536 creq->data_len -= byteoffset;
1537
1538 while (creq->data_len > 0) {
1539 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1540 if (err == 0 && __copy_to_user(
1541 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1542 (k_align_dst + byteoffset),
1543 creq->vbuf.dst[dst_i].len))
1544 return -EFAULT;
1545
1546 k_align_dst += creq->vbuf.dst[dst_i].len +
1547 byteoffset;
1548 creq->data_len -= creq->vbuf.dst[dst_i].len;
1549 dst_i++;
1550 } else {
1551 if (err == 0 && __copy_to_user(
1552 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1553 (k_align_dst + byteoffset),
1554 creq->data_len))
1555 return -EFAULT;
1556
1557 k_align_dst += creq->data_len;
1558 creq->vbuf.dst[dst_i].len -= creq->data_len;
1559 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1560 creq->data_len = 0;
1561 }
1562 }
1563 *di = dst_i;
1564
1565 return err;
1566};
1567
1568static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001569 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001570{
1571 int err = 0;
1572 int di = 0;
1573 int i = 0;
1574 int j = 0;
1575 int k = 0;
1576 uint32_t byteoffset = 0;
1577 int num_entries = 0;
1578 uint32_t total = 0;
1579 uint32_t len;
1580 uint8_t *k_buf_src = NULL;
1581 uint8_t *k_align_src = NULL;
1582 uint32_t max_data_xfer;
1583 struct qcedev_cipher_op_req *saved_req;
1584 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1585
1586 /* Verify Source Address's */
1587 for (i = 0; i < areq->cipher_op_req.entries; i++)
1588 if (!access_ok(VERIFY_READ,
1589 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
1590 areq->cipher_op_req.vbuf.src[i].len))
1591 return -EFAULT;
1592
1593 /* Verify Destination Address's */
1594 if (areq->cipher_op_req.in_place_op != 1)
1595 for (i = 0; i < areq->cipher_op_req.entries; i++)
1596 if (!access_ok(VERIFY_READ,
1597 (void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
1598 areq->cipher_op_req.vbuf.dst[i].len))
1599 return -EFAULT;
1600
1601 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1602 byteoffset = areq->cipher_op_req.byteoffset;
1603 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1604 GFP_KERNEL);
1605 if (k_buf_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001606 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 __func__, (uint32_t)k_buf_src);
1608 return -ENOMEM;
1609 }
1610 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
1611 CACHE_LINE_SIZE);
1612 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1613
1614 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1615 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001616 pr_err("%s: Can't Allocate memory:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617 __func__, (uint32_t)saved_req);
1618 kfree(k_buf_src);
1619 return -ENOMEM;
1620
1621 }
1622 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1623
1624 if (areq->cipher_op_req.data_len > max_data_xfer) {
1625 struct qcedev_cipher_op_req req;
1626
1627 /* save the original req structure */
1628 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1629
1630 i = 0;
1631 /* Address 32 KB at a time */
1632 while ((i < req.entries) && (err == 0)) {
1633 if (creq->vbuf.src[i].len > max_data_xfer) {
1634 creq->vbuf.src[0].len = max_data_xfer;
1635 if (i > 0) {
1636 creq->vbuf.src[0].vaddr =
1637 creq->vbuf.src[i].vaddr;
1638 }
1639
1640 creq->data_len = max_data_xfer;
1641 creq->entries = 1;
1642
1643 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001644 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645 if (err < 0) {
1646 kfree(k_buf_src);
1647 kfree(saved_req);
1648 return err;
1649 }
1650
1651 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1652 max_data_xfer;
1653 creq->vbuf.src[i].vaddr =
1654 req.vbuf.src[i].vaddr +
1655 max_data_xfer;
1656 req.vbuf.src[i].vaddr =
1657 creq->vbuf.src[i].vaddr;
1658 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1659
1660 } else {
1661 total = areq->cipher_op_req.byteoffset;
1662 for (j = i; j < req.entries; j++) {
1663 num_entries++;
1664 if ((total + creq->vbuf.src[j].len)
1665 >= max_data_xfer) {
1666 creq->vbuf.src[j].len =
1667 max_data_xfer - total;
1668 total = max_data_xfer;
1669 break;
1670 }
1671 total += creq->vbuf.src[j].len;
1672 }
1673
1674 creq->data_len = total;
1675 if (i > 0)
1676 for (k = 0; k < num_entries; k++) {
1677 creq->vbuf.src[k].len =
1678 creq->vbuf.src[i+k].len;
1679 creq->vbuf.src[k].vaddr =
1680 creq->vbuf.src[i+k].vaddr;
1681 }
1682 creq->entries = num_entries;
1683
1684 i = j;
1685 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001686 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 if (err < 0) {
1688 kfree(k_buf_src);
1689 kfree(saved_req);
1690 return err;
1691 }
1692
1693 num_entries = 0;
1694 areq->cipher_op_req.byteoffset = 0;
1695
1696 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1697 + creq->vbuf.src[i].len;
1698 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1699 creq->vbuf.src[i].len;
1700
1701 req.vbuf.src[i].vaddr =
1702 creq->vbuf.src[i].vaddr;
1703 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1704
1705 if (creq->vbuf.src[i].len == 0)
1706 i++;
1707 }
1708
1709 areq->cipher_op_req.byteoffset = 0;
1710 max_data_xfer = QCE_MAX_OPER_DATA;
1711 byteoffset = 0;
1712
1713 } /* end of while ((i < req.entries) && (err == 0)) */
1714 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001715 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001716 k_align_src);
1717
1718 /* Restore the original req structure */
1719 for (i = 0; i < saved_req->entries; i++) {
1720 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1721 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1722 }
1723 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1724 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1725 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1726 len += saved_req->vbuf.dst[i].len;
1727 }
1728 creq->entries = saved_req->entries;
1729 creq->data_len = saved_req->data_len;
1730 creq->byteoffset = saved_req->byteoffset;
1731
1732 kfree(saved_req);
1733 kfree(k_buf_src);
1734 return err;
1735
1736}
1737
1738static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1739 struct qcedev_control *podev)
1740{
1741 if ((req->entries == 0) || (req->data_len == 0))
1742 goto error;
1743 if ((req->alg >= QCEDEV_ALG_LAST) ||
1744 (req->mode >= QCEDEV_AES_DES_MODE_LAST))
1745 goto error;
1746 if (req->alg == QCEDEV_ALG_AES) {
1747 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1748 (!podev->ce_support.aes_xts))
1749 goto error;
1750 /* if intending to use HW key make sure key fields are set
1751 * correctly and HW key is indeed supported in target
1752 */
1753 if (req->encklen == 0) {
1754 int i;
1755 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
1756 if (req->enckey[i])
1757 goto error;
1758 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1759 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1760 if (!podev->platform_support.hw_key_support)
1761 goto error;
1762 } else {
1763 if (req->encklen == QCEDEV_AES_KEY_192) {
1764 if (!podev->ce_support.aes_key_192)
1765 goto error;
1766 } else {
1767 /* if not using HW key make sure key
1768 * length is valid
1769 */
1770 if (!((req->encklen == QCEDEV_AES_KEY_128) ||
1771 (req->encklen == QCEDEV_AES_KEY_256)))
1772 goto error;
1773 }
1774 }
1775 }
1776 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1777 if (req->byteoffset) {
1778 if (req->mode != QCEDEV_AES_MODE_CTR)
1779 goto error;
1780 else { /* if using CTR mode make sure not using Pmem */
1781 if (req->use_pmem)
1782 goto error;
1783 }
1784 }
1785 /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
1786 if (req->use_pmem) {
1787 if (!req->in_place_op)
1788 goto error;
1789 }
1790 /* Ensure zer ivlen for ECB mode */
1791 if (req->ivlen != 0) {
1792 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1793 (req->mode == QCEDEV_DES_MODE_ECB))
1794 goto error;
1795 } else {
1796 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1797 (req->mode != QCEDEV_DES_MODE_ECB))
1798 goto error;
1799 }
1800
1801 return 0;
1802error:
1803 return -EINVAL;
1804
1805}
1806
1807static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1808 struct qcedev_control *podev)
1809{
1810 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1811 (!podev->ce_support.cmac))
1812 goto sha_error;
1813
1814 if ((req->entries == 0) || (req->data_len == 0))
1815 goto sha_error;
1816
1817 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
1818 goto sha_error;
1819
1820 return 0;
1821sha_error:
1822 return -EINVAL;
1823}
1824
1825static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1826{
1827 int err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -07001828 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 struct qcedev_control *podev;
1830 struct qcedev_async_req qcedev_areq;
1831 struct qcedev_stat *pstat;
1832
Mona Hossain087c60b2011-07-20 10:34:57 -07001833 handle = file->private_data;
1834 podev = handle->cntl;
1835 qcedev_areq.handle = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001836 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001837 pr_err("%s: invalid handle %p\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001838 __func__, podev);
1839 return -ENOENT;
1840 }
1841
1842 /* Verify user arguments. */
1843 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1844 return -ENOTTY;
1845
1846 init_completion(&qcedev_areq.complete);
1847 pstat = &_qcedev_stat[podev->pdev->id];
1848
1849 switch (cmd) {
1850 case QCEDEV_IOCTL_LOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001851 if (podev->platform_support.ce_shared)
1852 err = qcedev_lock_ce(podev);
1853 else
1854 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001855 break;
1856 case QCEDEV_IOCTL_UNLOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001857 if (podev->platform_support.ce_shared)
1858 err = qcedev_unlock_ce(podev);
1859 else
1860 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 break;
1862 case QCEDEV_IOCTL_ENC_REQ:
1863 case QCEDEV_IOCTL_DEC_REQ:
1864 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1865 sizeof(struct qcedev_cipher_op_req)))
1866 return -EFAULT;
1867
1868 if (__copy_from_user(&qcedev_areq.cipher_op_req,
1869 (void __user *)arg,
1870 sizeof(struct qcedev_cipher_op_req)))
1871 return -EFAULT;
1872 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1873
1874 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1875 podev))
1876 return -EINVAL;
1877
Ramesh Masavarapua63ff1e2011-10-20 10:51:25 -07001878 if (qcedev_areq.cipher_op_req.use_pmem)
Mona Hossain087c60b2011-07-20 10:34:57 -07001879 err = qcedev_pmem_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001880 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001881 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 if (err)
1883 return err;
1884 if (__copy_to_user((void __user *)arg,
1885 &qcedev_areq.cipher_op_req,
1886 sizeof(struct qcedev_cipher_op_req)))
1887 return -EFAULT;
1888 break;
1889
1890 case QCEDEV_IOCTL_SHA_INIT_REQ:
1891
1892 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1893 sizeof(struct qcedev_sha_op_req)))
1894 return -EFAULT;
1895
1896 if (__copy_from_user(&qcedev_areq.sha_op_req,
1897 (void __user *)arg,
1898 sizeof(struct qcedev_sha_op_req)))
1899 return -EFAULT;
1900 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1901 return -EINVAL;
1902 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001903 err = qcedev_hash_init(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904 if (err)
1905 return err;
1906 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1907 sizeof(struct qcedev_sha_op_req)))
1908 return -EFAULT;
1909 break;
1910 case QCEDEV_IOCTL_GET_CMAC_REQ:
1911 if (!podev->ce_support.cmac)
1912 return -ENOTTY;
1913 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1914 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1915 sizeof(struct qcedev_sha_op_req)))
1916 return -EFAULT;
1917
1918 if (__copy_from_user(&qcedev_areq.sha_op_req,
1919 (void __user *)arg,
1920 sizeof(struct qcedev_sha_op_req)))
1921 return -EFAULT;
1922 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1923 return -EINVAL;
1924 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1925
1926 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001927 err = qcedev_hash_cmac(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 if (err)
1929 return err;
1930 } else {
Mona Hossain087c60b2011-07-20 10:34:57 -07001931 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001932 if (err)
1933 return err;
1934 }
1935
1936 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001937 &handle->sha_ctxt.digest[0],
1938 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001939 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1940 sizeof(struct qcedev_sha_op_req)))
1941 return -EFAULT;
1942 break;
1943
1944 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1945
1946 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1947 sizeof(struct qcedev_sha_op_req)))
1948 return -EFAULT;
1949
1950 if (__copy_from_user(&qcedev_areq.sha_op_req,
1951 (void __user *)arg,
1952 sizeof(struct qcedev_sha_op_req)))
1953 return -EFAULT;
1954 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1955 return -EINVAL;
1956 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001957 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 if (err)
1959 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001960 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001962 &handle->sha_ctxt.digest[0],
1963 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001964 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1965 sizeof(struct qcedev_sha_op_req)))
1966 return -EFAULT;
1967 break;
1968
1969 case QCEDEV_IOCTL_GET_SHA_REQ:
1970
1971 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1972 sizeof(struct qcedev_sha_op_req)))
1973 return -EFAULT;
1974
1975 if (__copy_from_user(&qcedev_areq.sha_op_req,
1976 (void __user *)arg,
1977 sizeof(struct qcedev_sha_op_req)))
1978 return -EFAULT;
1979 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1980 return -EINVAL;
1981 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001982 qcedev_hash_init(&qcedev_areq, handle);
1983 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 if (err)
1985 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001986 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001987 if (err)
1988 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001989 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001990 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001991 &handle->sha_ctxt.digest[0],
1992 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1994 sizeof(struct qcedev_sha_op_req)))
1995 return -EFAULT;
1996 break;
1997
1998 default:
1999 return -ENOTTY;
2000 }
2001
2002 return err;
2003}
2004
2005static int qcedev_probe(struct platform_device *pdev)
2006{
2007 void *handle = NULL;
2008 int rc = 0;
2009 struct qcedev_control *podev;
2010 struct msm_ce_hw_support *platform_support;
2011
2012 if (pdev->id >= MAX_QCE_DEVICE) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07002013 pr_err("%s: device id %d exceeds allowed %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002014 __func__, pdev->id, MAX_QCE_DEVICE);
2015 return -ENOENT;
2016 }
2017 podev = &qce_dev[pdev->id];
2018
2019 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
2020 podev->platform_support.ce_shared = platform_support->ce_shared;
2021 podev->platform_support.shared_ce_resource =
2022 platform_support->shared_ce_resource;
2023 podev->platform_support.hw_key_support =
2024 platform_support->hw_key_support;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002025 podev->platform_support.bus_scale_table =
2026 platform_support->bus_scale_table;
Mona Hossain650c22c2011-07-19 09:54:19 -07002027 podev->ce_lock_count = 0;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002028 podev->high_bw_req_count = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002029 INIT_LIST_HEAD(&podev->ready_commands);
2030 podev->active_command = NULL;
2031
2032 spin_lock_init(&podev->lock);
2033
2034 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
2035
2036 /* open qce */
2037 handle = qce_open(pdev, &rc);
2038 if (handle == NULL) {
2039 platform_set_drvdata(pdev, NULL);
2040 return rc;
2041 }
2042
2043 podev->qce = handle;
2044 podev->pdev = pdev;
2045 platform_set_drvdata(pdev, podev);
2046 qce_hw_support(podev->qce, &podev->ce_support);
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002047
2048 if (podev->platform_support.bus_scale_table != NULL) {
2049 podev->bus_scale_handle =
2050 msm_bus_scale_register_client(
2051 (struct msm_bus_scale_pdata *)
2052 podev->platform_support.bus_scale_table);
2053 if (!podev->bus_scale_handle) {
2054 printk(KERN_ERR "%s not able to get bus scale\n",
2055 __func__);
2056 rc = -ENOMEM;
2057 goto err;
2058 }
2059 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002060 rc = misc_register(&podev->miscdevice);
2061
2062 if (rc >= 0)
2063 return 0;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002064 else
2065 if (podev->platform_support.bus_scale_table != NULL)
2066 msm_bus_scale_unregister_client(
2067 podev->bus_scale_handle);
2068err:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069
2070 if (handle)
2071 qce_close(handle);
2072 platform_set_drvdata(pdev, NULL);
2073 podev->qce = NULL;
2074 podev->pdev = NULL;
2075 return rc;
2076};
2077
2078static int qcedev_remove(struct platform_device *pdev)
2079{
2080 struct qcedev_control *podev;
2081
2082 podev = platform_get_drvdata(pdev);
2083 if (!podev)
2084 return 0;
2085 if (podev->qce)
2086 qce_close(podev->qce);
2087
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002088 if (podev->platform_support.bus_scale_table != NULL)
2089 msm_bus_scale_unregister_client(podev->bus_scale_handle);
2090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2092 misc_deregister(&podev->miscdevice);
2093 tasklet_kill(&podev->done_tasklet);
2094 return 0;
2095};
2096
2097static struct platform_driver qcedev_plat_driver = {
2098 .probe = qcedev_probe,
2099 .remove = qcedev_remove,
2100 .driver = {
2101 .name = "qce",
2102 .owner = THIS_MODULE,
2103 },
2104};
2105
2106static int _disp_stats(int id)
2107{
2108 struct qcedev_stat *pstat;
2109 int len = 0;
2110
2111 pstat = &_qcedev_stat[id];
2112 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2113 "\nQualcomm QCE dev driver %d Statistics:\n",
2114 id + 1);
2115
2116 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2117 " Encryption operation success : %d\n",
2118 pstat->qcedev_enc_success);
2119 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2120 " Encryption operation fail : %d\n",
2121 pstat->qcedev_enc_fail);
2122 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2123 " Decryption operation success : %d\n",
2124 pstat->qcedev_dec_success);
2125
2126 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2127 " Encryption operation fail : %d\n",
2128 pstat->qcedev_dec_fail);
2129
2130 return len;
2131}
2132
2133static int _debug_stats_open(struct inode *inode, struct file *file)
2134{
2135 file->private_data = inode->i_private;
2136 return 0;
2137}
2138
2139static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2140 size_t count, loff_t *ppos)
2141{
2142 int rc = -EINVAL;
2143 int qcedev = *((int *) file->private_data);
2144 int len;
2145
2146 len = _disp_stats(qcedev);
2147
2148 rc = simple_read_from_buffer((void __user *) buf, len,
2149 ppos, (void *) _debug_read_buf, len);
2150
2151 return rc;
2152}
2153
2154static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2155 size_t count, loff_t *ppos)
2156{
2157
2158 int qcedev = *((int *) file->private_data);
2159
2160 memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
2161 return count;
2162};
2163
2164static const struct file_operations _debug_stats_ops = {
2165 .open = _debug_stats_open,
2166 .read = _debug_stats_read,
2167 .write = _debug_stats_write,
2168};
2169
2170static int _qcedev_debug_init(void)
2171{
2172 int rc;
2173 char name[DEBUG_MAX_FNAME];
2174 int i;
2175 struct dentry *dent;
2176
2177 _debug_dent = debugfs_create_dir("qcedev", NULL);
2178 if (IS_ERR(_debug_dent)) {
2179 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2180 PTR_ERR(_debug_dent));
2181 return PTR_ERR(_debug_dent);
2182 }
2183
2184 for (i = 0; i < MAX_QCE_DEVICE; i++) {
2185 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
2186 _debug_qcedev[i] = i;
2187 dent = debugfs_create_file(name, 0644, _debug_dent,
2188 &_debug_qcedev[i], &_debug_stats_ops);
2189 if (dent == NULL) {
2190 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2191 PTR_ERR(dent));
2192 rc = PTR_ERR(dent);
2193 goto err;
2194 }
2195 }
2196 return 0;
2197err:
2198 debugfs_remove_recursive(_debug_dent);
2199 return rc;
2200}
2201
2202static int qcedev_init(void)
2203{
2204 int rc;
2205
2206 rc = _qcedev_debug_init();
2207 if (rc)
2208 return rc;
2209 return platform_driver_register(&qcedev_plat_driver);
2210}
2211
2212static void qcedev_exit(void)
2213{
2214 debugfs_remove_recursive(_debug_dent);
2215 platform_driver_unregister(&qcedev_plat_driver);
2216}
2217
2218MODULE_LICENSE("GPL v2");
2219MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2220MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002221MODULE_VERSION("1.25");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002222
2223module_init(qcedev_init);
2224module_exit(qcedev_exit);