blob: fff494c1f86a844326afed1d352c7a84c79bc5bf [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm CE device driver.
2 *
Mona Hossain313f4ec2012-03-06 13:46:14 -08003 * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/mman.h>
15#include <linux/android_pmem.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <crypto/hash.h>
32#include <linux/platform_data/qcom_crypto_device.h>
33#include <mach/scm.h>
Ramesh Masavarapu49259682011-12-02 14:00:18 -080034#include <mach/msm_bus.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070035#include <linux/qcedev.h>
36#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38
39#define CACHE_LINE_SIZE 32
40#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
41
42static uint8_t _std_init_vector_sha1_uint8[] = {
43 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
44 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
45 0xC3, 0xD2, 0xE1, 0xF0
46};
47/* standard initialization vector for SHA-256, source: FIPS 180-2 */
48static uint8_t _std_init_vector_sha256_uint8[] = {
49 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
50 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
51 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
52 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
53};
54
55enum qcedev_crypto_oper_type {
56 QCEDEV_CRYPTO_OPER_CIPHER = 0,
57 QCEDEV_CRYPTO_OPER_SHA = 1,
58 QCEDEV_CRYPTO_OPER_LAST
59};
60
Mona Hossain087c60b2011-07-20 10:34:57 -070061struct qcedev_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
63struct qcedev_cipher_req {
64 struct ablkcipher_request creq;
65 void *cookie;
66};
67
68struct qcedev_sha_req {
69 struct ahash_request sreq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070 void *cookie;
71};
72
Mona Hossain087c60b2011-07-20 10:34:57 -070073struct qcedev_sha_ctxt {
74 uint32_t auth_data[4];
75 uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
76 uint32_t diglen;
77 uint8_t trailing_buf[64];
78 uint32_t trailing_buf_len;
79 uint8_t first_blk;
80 uint8_t last_blk;
81 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
82};
83
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084struct qcedev_async_req {
85 struct list_head list;
86 struct completion complete;
87 enum qcedev_crypto_oper_type op_type;
88 union {
89 struct qcedev_cipher_op_req cipher_op_req;
90 struct qcedev_sha_op_req sha_op_req;
91 };
92 union{
93 struct qcedev_cipher_req cipher_req;
94 struct qcedev_sha_req sha_req;
95 };
Mona Hossain087c60b2011-07-20 10:34:57 -070096 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 int err;
98};
99
Mona Hossain650c22c2011-07-19 09:54:19 -0700100static DEFINE_MUTEX(send_cmd_lock);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800101static DEFINE_MUTEX(sent_bw_req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102/**********************************************************************
103 * Register ourselves as a misc device to be able to access the dev driver
104 * from userspace. */
105
106
107#define QCEDEV_DEV "qcedev"
108
109struct qcedev_control{
110
111 /* CE features supported by platform */
112 struct msm_ce_hw_support platform_support;
113
Mona Hossain650c22c2011-07-19 09:54:19 -0700114 uint32_t ce_lock_count;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800115 uint32_t high_bw_req_count;
116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117 /* CE features/algorithms supported by HW engine*/
118 struct ce_hw_support ce_support;
119
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800120 uint32_t bus_scale_handle;
121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 /* misc device */
123 struct miscdevice miscdevice;
124
125 /* qce handle */
126 void *qce;
127
128 /* platform device */
129 struct platform_device *pdev;
130
131 unsigned magic;
132
133 struct list_head ready_commands;
134 struct qcedev_async_req *active_command;
135 spinlock_t lock;
136 struct tasklet_struct done_tasklet;
137};
138
Mona Hossain087c60b2011-07-20 10:34:57 -0700139struct qcedev_handle {
140 /* qcedev control handle */
141 struct qcedev_control *cntl;
142 /* qce internal sha context*/
143 struct qcedev_sha_ctxt sha_ctxt;
144};
145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146/*-------------------------------------------------------------------------
147* Resource Locking Service
148* ------------------------------------------------------------------------*/
149#define QCEDEV_CMD_ID 1
150#define QCEDEV_CE_LOCK_CMD 1
151#define QCEDEV_CE_UNLOCK_CMD 0
152#define NUM_RETRY 1000
153#define CE_BUSY 55
154
155static int qcedev_scm_cmd(int resource, int cmd, int *response)
156{
157#ifdef CONFIG_MSM_SCM
158
159 struct {
160 int resource;
161 int cmd;
162 } cmd_buf;
163
164 cmd_buf.resource = resource;
165 cmd_buf.cmd = cmd;
166
167 return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
168 sizeof(cmd_buf), response, sizeof(*response));
169
170#else
171 return 0;
172#endif
173}
174
Mona Hossain313f4ec2012-03-06 13:46:14 -0800175static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800176 bool high_bw_req)
177{
178 int ret = 0;
179
180 mutex_lock(&sent_bw_req);
181 if (high_bw_req) {
182 if (podev->high_bw_req_count == 0)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800183 ret = msm_bus_scale_client_update_request(
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800184 podev->bus_scale_handle, 1);
Mona Hossain313f4ec2012-03-06 13:46:14 -0800185 if (ret)
186 pr_err("%s Unable to set to high bandwidth\n",
187 __func__);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800188 podev->high_bw_req_count++;
189 } else {
190 if (podev->high_bw_req_count == 1)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800191 ret = msm_bus_scale_client_update_request(
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800192 podev->bus_scale_handle, 0);
Mona Hossain313f4ec2012-03-06 13:46:14 -0800193 if (ret)
194 pr_err("%s Unable to set to low bandwidth\n",
195 __func__);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800196 podev->high_bw_req_count--;
197 }
198 mutex_unlock(&sent_bw_req);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800199}
200
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static int qcedev_unlock_ce(struct qcedev_control *podev)
203{
Mona Hossain650c22c2011-07-19 09:54:19 -0700204 int ret = 0;
205
206 mutex_lock(&send_cmd_lock);
207 if (podev->ce_lock_count == 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 int response = 0;
209
210 if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
211 QCEDEV_CE_UNLOCK_CMD, &response)) {
Mona Hossain650c22c2011-07-19 09:54:19 -0700212 pr_err("Failed to release CE lock\n");
213 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700216 if (ret == 0) {
217 if (podev->ce_lock_count)
218 podev->ce_lock_count--;
219 else {
220 /* We should never be here */
221 ret = -EIO;
222 pr_err("CE hardware is already unlocked\n");
223 }
224 }
225 mutex_unlock(&send_cmd_lock);
226
227 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228}
229
230static int qcedev_lock_ce(struct qcedev_control *podev)
231{
Mona Hossain650c22c2011-07-19 09:54:19 -0700232 int ret = 0;
233
234 mutex_lock(&send_cmd_lock);
235 if (podev->ce_lock_count == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 int response = -CE_BUSY;
237 int i = 0;
238
239 do {
240 if (qcedev_scm_cmd(
241 podev->platform_support.shared_ce_resource,
242 QCEDEV_CE_LOCK_CMD, &response)) {
243 response = -EINVAL;
244 break;
245 }
246 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
247
Mona Hossain650c22c2011-07-19 09:54:19 -0700248 if ((response == -CE_BUSY) && (i >= NUM_RETRY)) {
249 ret = -EUSERS;
250 } else {
251 if (response < 0)
252 ret = -EINVAL;
253 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700255 if (ret == 0)
256 podev->ce_lock_count++;
257 mutex_unlock(&send_cmd_lock);
258 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259}
260
261#define QCEDEV_MAGIC 0x56434544 /* "qced" */
262
263static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
264static int qcedev_open(struct inode *inode, struct file *file);
265static int qcedev_release(struct inode *inode, struct file *file);
266static int start_cipher_req(struct qcedev_control *podev);
Mona Hossain650c22c2011-07-19 09:54:19 -0700267static int start_sha_req(struct qcedev_control *podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268
269static const struct file_operations qcedev_fops = {
270 .owner = THIS_MODULE,
271 .unlocked_ioctl = qcedev_ioctl,
272 .open = qcedev_open,
273 .release = qcedev_release,
274};
275
276static struct qcedev_control qce_dev[] = {
277 {
278 .miscdevice = {
279 .minor = MISC_DYNAMIC_MINOR,
280 .name = "qce",
281 .fops = &qcedev_fops,
282 },
283 .magic = QCEDEV_MAGIC,
284 },
285};
286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
288#define DEBUG_MAX_FNAME 16
289#define DEBUG_MAX_RW_BUF 1024
290
291struct qcedev_stat {
292 u32 qcedev_dec_success;
293 u32 qcedev_dec_fail;
294 u32 qcedev_enc_success;
295 u32 qcedev_enc_fail;
296 u32 qcedev_sha_success;
297 u32 qcedev_sha_fail;
298};
299
300static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
301static struct dentry *_debug_dent;
302static char _debug_read_buf[DEBUG_MAX_RW_BUF];
303static int _debug_qcedev[MAX_QCE_DEVICE];
304
305static struct qcedev_control *qcedev_minor_to_control(unsigned n)
306{
307 int i;
308
309 for (i = 0; i < MAX_QCE_DEVICE; i++) {
310 if (qce_dev[i].miscdevice.minor == n)
311 return &qce_dev[i];
312 }
313 return NULL;
314}
315
316static int qcedev_open(struct inode *inode, struct file *file)
317{
Mona Hossain087c60b2011-07-20 10:34:57 -0700318 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 struct qcedev_control *podev;
320
321 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
322 if (podev == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700323 pr_err("%s: no such device %d\n", __func__,
324 MINOR(inode->i_rdev));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325 return -ENOENT;
326 }
327
Mona Hossain087c60b2011-07-20 10:34:57 -0700328 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
329 if (handle == NULL) {
330 pr_err("Failed to allocate memory %ld\n",
331 PTR_ERR(handle));
332 return -ENOMEM;
333 }
334
335 handle->cntl = podev;
336 file->private_data = handle;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800337 if (podev->platform_support.bus_scale_table != NULL)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800338 qcedev_ce_high_bw_req(podev, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 return 0;
340}
341
342static int qcedev_release(struct inode *inode, struct file *file)
343{
344 struct qcedev_control *podev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700345 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346
Mona Hossain087c60b2011-07-20 10:34:57 -0700347 handle = file->private_data;
348 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700350 pr_err("%s: invalid handle %p\n",
351 __func__, podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700353 kzfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 file->private_data = NULL;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800355 if (podev->platform_support.bus_scale_table != NULL)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800356 qcedev_ce_high_bw_req(podev, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 return 0;
358}
359
360static void req_done(unsigned long data)
361{
362 struct qcedev_control *podev = (struct qcedev_control *)data;
363 struct qcedev_async_req *areq;
364 unsigned long flags = 0;
365 struct qcedev_async_req *new_req = NULL;
366 int ret = 0;
367
368 spin_lock_irqsave(&podev->lock, flags);
369 areq = podev->active_command;
370 podev->active_command = NULL;
371
372again:
373 if (!list_empty(&podev->ready_commands)) {
374 new_req = container_of(podev->ready_commands.next,
375 struct qcedev_async_req, list);
376 list_del(&new_req->list);
377 podev->active_command = new_req;
378 new_req->err = 0;
379 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
380 ret = start_cipher_req(podev);
381 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700382 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 }
384
385 spin_unlock_irqrestore(&podev->lock, flags);
386
387 if (areq)
388 complete(&areq->complete);
389
390 if (new_req && ret) {
391 complete(&new_req->complete);
392 spin_lock_irqsave(&podev->lock, flags);
393 podev->active_command = NULL;
394 areq = NULL;
395 ret = 0;
396 new_req = NULL;
397 goto again;
398 }
399
400 return;
401}
402
403static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
404 unsigned char *authdata, int ret)
405{
406 struct qcedev_sha_req *areq;
407 struct qcedev_control *pdev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700408 struct qcedev_handle *handle;
409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410 uint32_t *auth32 = (uint32_t *)authdata;
411
412 areq = (struct qcedev_sha_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700413 handle = (struct qcedev_handle *) areq->cookie;
414 pdev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415
416 if (digest)
Mona Hossain087c60b2011-07-20 10:34:57 -0700417 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
419 if (authdata) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700420 handle->sha_ctxt.auth_data[0] = auth32[0];
421 handle->sha_ctxt.auth_data[1] = auth32[1];
422 handle->sha_ctxt.auth_data[2] = auth32[2];
423 handle->sha_ctxt.auth_data[3] = auth32[3];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 }
425
426 tasklet_schedule(&pdev->done_tasklet);
427};
428
429
430static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
431 unsigned char *iv, int ret)
432{
433 struct qcedev_cipher_req *areq;
Mona Hossain087c60b2011-07-20 10:34:57 -0700434 struct qcedev_handle *handle;
435 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 struct qcedev_async_req *qcedev_areq;
437
438 areq = (struct qcedev_cipher_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700439 handle = (struct qcedev_handle *) areq->cookie;
440 podev = handle->cntl;
441 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442
443 if (iv)
444 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
445 qcedev_areq->cipher_op_req.ivlen);
Mona Hossain087c60b2011-07-20 10:34:57 -0700446 tasklet_schedule(&podev->done_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447};
448
449static int start_cipher_req(struct qcedev_control *podev)
450{
451 struct qcedev_async_req *qcedev_areq;
452 struct qce_req creq;
453 int ret = 0;
454
455 /* start the command on the podev->active_command */
456 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457
Mona Hossain087c60b2011-07-20 10:34:57 -0700458 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
460 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
461 creq.pmem = &qcedev_areq->cipher_op_req.pmem;
462 else
463 creq.pmem = NULL;
464
465 switch (qcedev_areq->cipher_op_req.alg) {
466 case QCEDEV_ALG_DES:
467 creq.alg = CIPHER_ALG_DES;
468 break;
469 case QCEDEV_ALG_3DES:
470 creq.alg = CIPHER_ALG_3DES;
471 break;
472 case QCEDEV_ALG_AES:
473 creq.alg = CIPHER_ALG_AES;
474 break;
475 default:
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -0700476 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 };
478
479 switch (qcedev_areq->cipher_op_req.mode) {
480 case QCEDEV_AES_MODE_CBC:
481 case QCEDEV_DES_MODE_CBC:
482 creq.mode = QCE_MODE_CBC;
483 break;
484 case QCEDEV_AES_MODE_ECB:
485 case QCEDEV_DES_MODE_ECB:
486 creq.mode = QCE_MODE_ECB;
487 break;
488 case QCEDEV_AES_MODE_CTR:
489 creq.mode = QCE_MODE_CTR;
490 break;
491 case QCEDEV_AES_MODE_XTS:
492 creq.mode = QCE_MODE_XTS;
493 break;
494 default:
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -0700495 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 };
497
498 if ((creq.alg == CIPHER_ALG_AES) &&
499 (creq.mode == QCE_MODE_CTR)) {
500 creq.dir = QCE_ENCRYPT;
501 } else {
502 if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
503 creq.dir = QCE_ENCRYPT;
504 else
505 creq.dir = QCE_DECRYPT;
506 }
507
508 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
509 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
510
511 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
512 creq.encklen = qcedev_areq->cipher_op_req.encklen;
513
514 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
515
516 if (qcedev_areq->cipher_op_req.encklen == 0) {
517 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
518 || (qcedev_areq->cipher_op_req.op ==
519 QCEDEV_OPER_DEC_NO_KEY))
520 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
521 else {
522 int i;
523
524 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
525 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
526 break;
527 }
528
529 if ((podev->platform_support.hw_key_support == 1) &&
530 (i == QCEDEV_MAX_KEY_SIZE))
531 creq.op = QCE_REQ_ABLK_CIPHER;
532 else {
533 ret = -EINVAL;
534 goto unsupported;
535 }
536 }
537 } else {
538 creq.op = QCE_REQ_ABLK_CIPHER;
539 }
540
541 creq.qce_cb = qcedev_cipher_req_cb;
542 creq.areq = (void *)&qcedev_areq->cipher_req;
543
544 ret = qce_ablk_cipher_req(podev->qce, &creq);
545unsupported:
546 if (ret)
547 qcedev_areq->err = -ENXIO;
548 else
549 qcedev_areq->err = 0;
550 return ret;
551};
552
Mona Hossain650c22c2011-07-19 09:54:19 -0700553static int start_sha_req(struct qcedev_control *podev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554{
555 struct qcedev_async_req *qcedev_areq;
556 struct qce_sha_req sreq;
557 int ret = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700558 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559
560 /* start the command on the podev->active_command */
561 qcedev_areq = podev->active_command;
Mona Hossain087c60b2011-07-20 10:34:57 -0700562 handle = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563
564 switch (qcedev_areq->sha_op_req.alg) {
565 case QCEDEV_ALG_SHA1:
566 sreq.alg = QCE_HASH_SHA1;
567 break;
568 case QCEDEV_ALG_SHA256:
569 sreq.alg = QCE_HASH_SHA256;
570 break;
571 case QCEDEV_ALG_SHA1_HMAC:
572 if (podev->ce_support.sha_hmac) {
573 sreq.alg = QCE_HASH_SHA1_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700574 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575
576 } else {
577 sreq.alg = QCE_HASH_SHA1;
578 sreq.authkey = NULL;
579 }
580 break;
581 case QCEDEV_ALG_SHA256_HMAC:
582 if (podev->ce_support.sha_hmac) {
583 sreq.alg = QCE_HASH_SHA256_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700584 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585
586 } else {
587 sreq.alg = QCE_HASH_SHA256;
588 sreq.authkey = NULL;
589 }
590 break;
591 case QCEDEV_ALG_AES_CMAC:
592 sreq.alg = QCE_HASH_AES_CMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700593 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 sreq.authklen = qcedev_areq->sha_op_req.authklen;
595 break;
596 default:
597 break;
598 };
599
Mona Hossain087c60b2011-07-20 10:34:57 -0700600 qcedev_areq->sha_req.cookie = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601
602 sreq.qce_cb = qcedev_sha_req_cb;
603 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700604 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
605 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
606 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
607 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
608 sreq.digest = &handle->sha_ctxt.digest[0];
609 sreq.first_blk = handle->sha_ctxt.first_blk;
610 sreq.last_blk = handle->sha_ctxt.last_blk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611 }
612 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
613 sreq.src = qcedev_areq->sha_req.sreq.src;
614 sreq.areq = (void *)&qcedev_areq->sha_req;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615
616 ret = qce_process_sha_req(podev->qce, &sreq);
617
618 if (ret)
619 qcedev_areq->err = -ENXIO;
620 else
621 qcedev_areq->err = 0;
622 return ret;
623};
624
625static int submit_req(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700626 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627{
Mona Hossain087c60b2011-07-20 10:34:57 -0700628 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 unsigned long flags = 0;
630 int ret = 0;
631 struct qcedev_stat *pstat;
632
633 qcedev_areq->err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700634 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635
Mona Hossain650c22c2011-07-19 09:54:19 -0700636 if (podev->platform_support.ce_shared) {
637 ret = qcedev_lock_ce(podev);
638 if (ret)
639 return ret;
640 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641
642 spin_lock_irqsave(&podev->lock, flags);
643
644 if (podev->active_command == NULL) {
645 podev->active_command = qcedev_areq;
646 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
647 ret = start_cipher_req(podev);
648 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700649 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 } else {
651 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
652 }
653
654 if (ret != 0)
655 podev->active_command = NULL;
656
657 spin_unlock_irqrestore(&podev->lock, flags);
658
659 if (ret == 0)
660 wait_for_completion(&qcedev_areq->complete);
661
Mona Hossain650c22c2011-07-19 09:54:19 -0700662 if (podev->platform_support.ce_shared)
663 ret = qcedev_unlock_ce(podev);
664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 if (ret)
Mona Hossain650c22c2011-07-19 09:54:19 -0700666 qcedev_areq->err = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667
668 pstat = &_qcedev_stat[podev->pdev->id];
669 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
670 switch (qcedev_areq->cipher_op_req.op) {
671 case QCEDEV_OPER_DEC:
672 if (qcedev_areq->err)
673 pstat->qcedev_dec_fail++;
674 else
675 pstat->qcedev_dec_success++;
676 break;
677 case QCEDEV_OPER_ENC:
678 if (qcedev_areq->err)
679 pstat->qcedev_enc_fail++;
680 else
681 pstat->qcedev_enc_success++;
682 break;
683 default:
684 break;
685 };
686 } else {
687 if (qcedev_areq->err)
688 pstat->qcedev_sha_fail++;
689 else
690 pstat->qcedev_sha_success++;
691 }
692
693 return qcedev_areq->err;
694}
695
696static int qcedev_sha_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700697 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698{
Mona Hossain087c60b2011-07-20 10:34:57 -0700699 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700
701 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
702 sha_ctxt->first_blk = 1;
703
704 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
705 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
706 memcpy(&sha_ctxt->digest[0],
707 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
708 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
709 } else {
710 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
711 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
712 memcpy(&sha_ctxt->digest[0],
713 &_std_init_vector_sha256_uint8[0],
714 SHA256_DIGEST_SIZE);
715 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
716 }
717 }
718 return 0;
719}
720
721
722static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700723 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724{
725 int err = 0;
726 int i = 0;
727 struct scatterlist sg_src[2];
728 uint32_t total;
729
730 uint8_t *user_src = NULL;
731 uint8_t *k_src = NULL;
732 uint8_t *k_buf_src = NULL;
733 uint8_t *k_align_src = NULL;
734
735 uint32_t sha_pad_len = 0;
736 uint32_t trailing_buf_len = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700737 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 uint32_t sha_block_size;
739
740 total = qcedev_areq->sha_op_req.data_len + t_buf;
741
742 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
743 sha_block_size = SHA1_BLOCK_SIZE;
744 else
745 sha_block_size = SHA256_BLOCK_SIZE;
746
747 if (total <= sha_block_size) {
748 uint32_t len = qcedev_areq->sha_op_req.data_len;
749
750 i = 0;
751
Mona Hossain087c60b2011-07-20 10:34:57 -0700752 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753
754 /* Copy data from user src(s) */
755 while (len > 0) {
756 user_src =
757 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
758 if (user_src && __copy_from_user(k_src,
759 (void __user *)user_src,
760 qcedev_areq->sha_op_req.data[i].len))
761 return -EFAULT;
762
763 len -= qcedev_areq->sha_op_req.data[i].len;
764 k_src += qcedev_areq->sha_op_req.data[i].len;
765 i++;
766 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700767 handle->sha_ctxt.trailing_buf_len = total;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700768
769 return 0;
770 }
771
772
773 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
774 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700775 if (k_buf_src == NULL) {
776 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
777 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700778 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700779 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780
781 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
782 CACHE_LINE_SIZE);
783 k_src = k_align_src;
784
785 /* check for trailing buffer from previous updates and append it */
786 if (t_buf > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700787 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 t_buf);
789 k_src += t_buf;
790 }
791
792 /* Copy data from user src(s) */
793 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
794 if (user_src && __copy_from_user(k_src,
795 (void __user *)user_src,
796 qcedev_areq->sha_op_req.data[0].len)) {
797 kfree(k_buf_src);
798 return -EFAULT;
799 }
800 k_src += qcedev_areq->sha_op_req.data[0].len;
801 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
802 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
803 if (user_src && __copy_from_user(k_src,
804 (void __user *)user_src,
805 qcedev_areq->sha_op_req.data[i].len)) {
806 kfree(k_buf_src);
807 return -EFAULT;
808 }
809 k_src += qcedev_areq->sha_op_req.data[i].len;
810 }
811
812 /* get new trailing buffer */
813 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
814 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
815
816 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
817 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
818 total-trailing_buf_len);
819 sg_mark_end(qcedev_areq->sha_req.sreq.src);
820
821 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
822
823 /* update sha_ctxt trailing buf content to new trailing buf */
824 if (trailing_buf_len > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700825 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
826 memcpy(&handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 (k_src - trailing_buf_len),
828 trailing_buf_len);
829 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700830 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831
Mona Hossain087c60b2011-07-20 10:34:57 -0700832 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833
Mona Hossain087c60b2011-07-20 10:34:57 -0700834 handle->sha_ctxt.last_blk = 0;
835 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836
837 kfree(k_buf_src);
838 return err;
839}
840
841static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700842 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843{
844 int err = 0;
845 int i = 0;
846 int j = 0;
847 int k = 0;
848 int num_entries = 0;
849 uint32_t total = 0;
850
851 /* verify address src(s) */
852 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
853 if (!access_ok(VERIFY_READ,
854 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
855 qcedev_areq->sha_op_req.data[i].len))
856 return -EFAULT;
857
858 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
859
860 struct qcedev_sha_op_req *saved_req;
861 struct qcedev_sha_op_req req;
862 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
863
864 /* save the original req structure */
865 saved_req =
866 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
867 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700868 pr_err("%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 __func__, (uint32_t)saved_req);
870 return -ENOMEM;
871 }
872 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
873 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
874
875 i = 0;
876 /* Address 32 KB at a time */
877 while ((i < req.entries) && (err == 0)) {
878 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
879 sreq->data[0].len = QCE_MAX_OPER_DATA;
880 if (i > 0) {
881 sreq->data[0].vaddr =
882 sreq->data[i].vaddr;
883 }
884
885 sreq->data_len = QCE_MAX_OPER_DATA;
886 sreq->entries = 1;
887
888 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700889 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700890
891 sreq->data[i].len = req.data[i].len -
892 QCE_MAX_OPER_DATA;
893 sreq->data[i].vaddr = req.data[i].vaddr +
894 QCE_MAX_OPER_DATA;
895 req.data[i].vaddr = sreq->data[i].vaddr;
896 req.data[i].len = sreq->data[i].len;
897 } else {
898 total = 0;
899 for (j = i; j < req.entries; j++) {
900 num_entries++;
901 if ((total + sreq->data[j].len) >=
902 QCE_MAX_OPER_DATA) {
903 sreq->data[j].len =
904 (QCE_MAX_OPER_DATA - total);
905 total = QCE_MAX_OPER_DATA;
906 break;
907 }
908 total += sreq->data[j].len;
909 }
910
911 sreq->data_len = total;
912 if (i > 0)
913 for (k = 0; k < num_entries; k++) {
914 sreq->data[k].len =
915 sreq->data[i+k].len;
916 sreq->data[k].vaddr =
917 sreq->data[i+k].vaddr;
918 }
919 sreq->entries = num_entries;
920
921 i = j;
922 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700923 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924 num_entries = 0;
925
926 sreq->data[i].vaddr = req.data[i].vaddr +
927 sreq->data[i].len;
928 sreq->data[i].len = req.data[i].len -
929 sreq->data[i].len;
930 req.data[i].vaddr = sreq->data[i].vaddr;
931 req.data[i].len = sreq->data[i].len;
932
933 if (sreq->data[i].len == 0)
934 i++;
935 }
936 } /* end of while ((i < req.entries) && (err == 0)) */
937
938 /* Restore the original req structure */
939 for (i = 0; i < saved_req->entries; i++) {
940 sreq->data[i].len = saved_req->data[i].len;
941 sreq->data[i].vaddr = saved_req->data[i].vaddr;
942 }
943 sreq->entries = saved_req->entries;
944 sreq->data_len = saved_req->data_len;
945 kfree(saved_req);
946 } else
Mona Hossain087c60b2011-07-20 10:34:57 -0700947 err = qcedev_sha_update_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948
949 return err;
950}
951
952static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700953 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954{
955 int err = 0;
956 struct scatterlist sg_src;
957 uint32_t total;
958
959 uint8_t *k_buf_src = NULL;
960 uint8_t *k_align_src = NULL;
961
Mona Hossain087c60b2011-07-20 10:34:57 -0700962 handle->sha_ctxt.first_blk = 0;
963 handle->sha_ctxt.last_blk = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964
Mona Hossain087c60b2011-07-20 10:34:57 -0700965 total = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966
967 if (total) {
968 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
969 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700970 if (k_buf_src == NULL) {
971 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
972 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700974 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975
976 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
977 CACHE_LINE_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -0700978 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700980 handle->sha_ctxt.last_blk = 1;
981 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982
983 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
984 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
985 sg_mark_end(qcedev_areq->sha_req.sreq.src);
986
987 qcedev_areq->sha_req.sreq.nbytes = total;
988
Mona Hossain087c60b2011-07-20 10:34:57 -0700989 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990
Mona Hossain087c60b2011-07-20 10:34:57 -0700991 handle->sha_ctxt.first_blk = 0;
992 handle->sha_ctxt.last_blk = 0;
993 handle->sha_ctxt.auth_data[0] = 0;
994 handle->sha_ctxt.auth_data[1] = 0;
995 handle->sha_ctxt.trailing_buf_len = 0;
996 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997
998 kfree(k_buf_src);
999 return err;
1000}
1001
1002static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001003 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004{
1005 int err = 0;
1006 int i = 0;
1007 struct scatterlist sg_src[2];
1008 uint32_t total;
1009
1010 uint8_t *user_src = NULL;
1011 uint8_t *k_src = NULL;
1012 uint8_t *k_buf_src = NULL;
1013
1014 total = qcedev_areq->sha_op_req.data_len;
1015
1016 /* verify address src(s) */
1017 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
1018 if (!access_ok(VERIFY_READ,
1019 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
1020 qcedev_areq->sha_op_req.data[i].len))
1021 return -EFAULT;
1022
1023 /* Verify Source Address */
1024 if (!access_ok(VERIFY_READ,
1025 (void __user *)qcedev_areq->sha_op_req.authkey,
1026 qcedev_areq->sha_op_req.authklen))
1027 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001028 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029 (void __user *)qcedev_areq->sha_op_req.authkey,
1030 qcedev_areq->sha_op_req.authklen))
1031 return -EFAULT;
1032
1033
1034 k_buf_src = kmalloc(total, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001035 if (k_buf_src == NULL) {
1036 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
1037 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001039 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040
1041 k_src = k_buf_src;
1042
1043 /* Copy data from user src(s) */
1044 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
1045 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
1046 user_src =
1047 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
1048 if (user_src && __copy_from_user(k_src, (void __user *)user_src,
1049 qcedev_areq->sha_op_req.data[i].len)) {
1050 kfree(k_buf_src);
1051 return -EFAULT;
1052 }
1053 k_src += qcedev_areq->sha_op_req.data[i].len;
1054 }
1055
1056 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
1057 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
1058 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1059
1060 qcedev_areq->sha_req.sreq.nbytes = total;
Mona Hossain087c60b2011-07-20 10:34:57 -07001061 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
1062 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063
1064 kfree(k_buf_src);
1065 return err;
1066}
1067
1068static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001069 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001070{
1071 int err = 0;
1072
1073 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
1074 /* Verify Source Address */
1075 if (!access_ok(VERIFY_READ,
1076 (void __user *)areq->sha_op_req.authkey,
1077 areq->sha_op_req.authklen))
1078 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001079 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001080 (void __user *)areq->sha_op_req.authkey,
1081 areq->sha_op_req.authklen))
1082 return -EFAULT;
1083 } else {
1084 struct qcedev_async_req authkey_areq;
1085
1086 init_completion(&authkey_areq.complete);
1087
1088 authkey_areq.sha_op_req.entries = 1;
1089 authkey_areq.sha_op_req.data[0].vaddr =
1090 areq->sha_op_req.authkey;
1091 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
1092 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
1093 authkey_areq.sha_op_req.diglen = 0;
1094 memset(&authkey_areq.sha_op_req.digest[0], 0,
1095 QCEDEV_MAX_SHA_DIGEST);
1096 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1097 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1098 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1099 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1100
1101 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1102
Mona Hossain087c60b2011-07-20 10:34:57 -07001103 qcedev_sha_init(&authkey_areq, handle);
1104 err = qcedev_sha_update(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 if (!err)
Mona Hossain087c60b2011-07-20 10:34:57 -07001106 err = qcedev_sha_final(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 else
1108 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001109 memcpy(&handle->sha_ctxt.authkey[0],
1110 &handle->sha_ctxt.digest[0],
1111 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 }
1113 return err;
1114}
1115
1116static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001117 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118{
1119 int err = 0;
1120 struct scatterlist sg_src;
1121 uint8_t *k_src = NULL;
1122 uint32_t sha_block_size = 0;
1123 uint32_t sha_digest_size = 0;
1124
1125 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1126 sha_digest_size = SHA1_DIGEST_SIZE;
1127 sha_block_size = SHA1_BLOCK_SIZE;
1128 } else {
1129 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1130 sha_digest_size = SHA256_DIGEST_SIZE;
1131 sha_block_size = SHA256_BLOCK_SIZE;
1132 }
1133 }
1134 k_src = kmalloc(sha_block_size, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001135 if (k_src == NULL) {
1136 pr_err("%s: Can't Allocate memory: k_src 0x%x\n",
1137 __func__, (uint32_t)k_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001139 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140
1141 /* check for trailing buffer from previous updates and append it */
Mona Hossain087c60b2011-07-20 10:34:57 -07001142 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1143 handle->sha_ctxt.trailing_buf_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144
1145 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1146 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1147 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1148
1149 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
Mona Hossain087c60b2011-07-20 10:34:57 -07001150 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1151 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152 sha_digest_size);
Mona Hossain087c60b2011-07-20 10:34:57 -07001153 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001154
Mona Hossain087c60b2011-07-20 10:34:57 -07001155 handle->sha_ctxt.first_blk = 1;
1156 handle->sha_ctxt.last_blk = 0;
1157 handle->sha_ctxt.auth_data[0] = 0;
1158 handle->sha_ctxt.auth_data[1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159
1160 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001161 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001163 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 }
1165
1166 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001167 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001169 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 }
Mona Hossain087c60b2011-07-20 10:34:57 -07001171 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172
Mona Hossain087c60b2011-07-20 10:34:57 -07001173 handle->sha_ctxt.last_blk = 0;
1174 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175
1176 kfree(k_src);
1177 return err;
1178}
1179
1180static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001181 struct qcedev_handle *handle, bool ikey)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182{
1183 int i;
1184 uint32_t constant;
1185 uint32_t sha_block_size;
1186
1187 if (ikey)
1188 constant = 0x36;
1189 else
1190 constant = 0x5c;
1191
1192 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1193 sha_block_size = SHA1_BLOCK_SIZE;
1194 else
1195 sha_block_size = SHA256_BLOCK_SIZE;
1196
Mona Hossain087c60b2011-07-20 10:34:57 -07001197 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 for (i = 0; i < sha_block_size; i++)
Mona Hossain087c60b2011-07-20 10:34:57 -07001199 handle->sha_ctxt.trailing_buf[i] =
1200 (handle->sha_ctxt.authkey[i] ^ constant);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201
Mona Hossain087c60b2011-07-20 10:34:57 -07001202 handle->sha_ctxt.trailing_buf_len = sha_block_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 return 0;
1204}
1205
1206static int qcedev_hmac_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001207 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208{
1209 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001210 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001211
Mona Hossain087c60b2011-07-20 10:34:57 -07001212 qcedev_sha_init(areq, handle);
1213 err = qcedev_set_hmac_auth_key(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 if (err)
1215 return err;
1216 if (!podev->ce_support.sha_hmac)
Mona Hossain087c60b2011-07-20 10:34:57 -07001217 qcedev_hmac_update_iokey(areq, handle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218 return 0;
1219}
1220
1221static int qcedev_hmac_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001222 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223{
1224 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001225 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226
Mona Hossain087c60b2011-07-20 10:34:57 -07001227 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001228 if (podev->ce_support.sha_hmac)
1229 return err;
1230
Mona Hossain087c60b2011-07-20 10:34:57 -07001231 qcedev_hmac_update_iokey(areq, handle, false);
1232 err = qcedev_hmac_get_ohash(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233 if (err)
1234 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001235 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236
1237 return err;
1238}
1239
1240static int qcedev_hash_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001241 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242{
1243 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1244 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001245 return qcedev_sha_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001247 return qcedev_hmac_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248}
1249
1250static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001251 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252{
Mona Hossain087c60b2011-07-20 10:34:57 -07001253 return qcedev_sha_update(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254}
1255
1256static int qcedev_hash_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001257 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258{
1259 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1260 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001261 return qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001263 return qcedev_hmac_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264}
1265
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001266#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001268 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269{
1270 int i = 0;
1271 int err = 0;
1272 struct scatterlist *sg_src = NULL;
1273 struct scatterlist *sg_dst = NULL;
1274 struct scatterlist *sg_ndex = NULL;
1275 struct file *file_src = NULL;
1276 struct file *file_dst = NULL;
1277 unsigned long paddr;
1278 unsigned long kvaddr;
1279 unsigned long len;
1280
1281 sg_src = kmalloc((sizeof(struct scatterlist) *
1282 areq->cipher_op_req.entries), GFP_KERNEL);
1283 if (sg_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001284 pr_err("%s: Can't Allocate memory:sg_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 __func__, (uint32_t)sg_src);
1286 return -ENOMEM;
1287
1288 }
1289 memset(sg_src, 0, (sizeof(struct scatterlist) *
1290 areq->cipher_op_req.entries));
1291 sg_ndex = sg_src;
1292 areq->cipher_req.creq.src = sg_src;
1293
1294 /* address src */
1295 get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
1296 &kvaddr, &len, &file_src);
1297
1298 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1299 sg_set_buf(sg_ndex,
1300 ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
1301 areq->cipher_op_req.pmem.src[i].len);
1302 sg_ndex++;
1303 }
1304 sg_mark_end(--sg_ndex);
1305
1306 for (i = 0; i < areq->cipher_op_req.entries; i++)
1307 areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
1308
1309 /* address dst */
1310 /* If not place encryption/decryption */
1311 if (areq->cipher_op_req.in_place_op != 1) {
1312 sg_dst = kmalloc((sizeof(struct scatterlist) *
1313 areq->cipher_op_req.entries), GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001314 if (sg_dst == NULL) {
1315 pr_err("%s: Can't Allocate memory: sg_dst 0x%x\n",
1316 __func__, (uint32_t)sg_dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001317 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001318 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319 memset(sg_dst, 0, (sizeof(struct scatterlist) *
1320 areq->cipher_op_req.entries));
1321 areq->cipher_req.creq.dst = sg_dst;
1322 sg_ndex = sg_dst;
1323
1324 get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
1325 &kvaddr, &len, &file_dst);
1326 for (i = 0; i < areq->cipher_op_req.entries; i++)
1327 sg_set_buf(sg_ndex++,
1328 ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
1329 + kvaddr), areq->cipher_op_req.pmem.dst[i].len);
1330 sg_mark_end(--sg_ndex);
1331
1332 for (i = 0; i < areq->cipher_op_req.entries; i++)
1333 areq->cipher_op_req.pmem.dst[i].offset +=
1334 (uint32_t)paddr;
1335 } else {
1336 areq->cipher_req.creq.dst = sg_src;
1337 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1338 areq->cipher_op_req.pmem.dst[i].offset =
1339 areq->cipher_op_req.pmem.src[i].offset;
1340 areq->cipher_op_req.pmem.dst[i].len =
1341 areq->cipher_op_req.pmem.src[i].len;
1342 }
1343 }
1344
1345 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1346 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1347
Mona Hossain087c60b2011-07-20 10:34:57 -07001348 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349
1350 kfree(sg_src);
1351 kfree(sg_dst);
1352
1353 if (file_dst)
1354 put_pmem_file(file_dst);
1355 if (file_src)
1356 put_pmem_file(file_src);
1357
1358 return err;
1359};
1360
1361
1362static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001363 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364{
1365 int err = 0;
1366 int i = 0;
1367 int j = 0;
1368 int k = 0;
1369 int num_entries = 0;
1370 uint32_t total = 0;
1371 struct qcedev_cipher_op_req *saved_req;
1372 struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
1373
1374 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1375 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001376 pr_err(KERN_ERR "%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377 __func__, (uint32_t)saved_req);
1378 return -ENOMEM;
1379 }
1380 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1381
1382 if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
1383
1384 struct qcedev_cipher_op_req req;
1385
1386 /* save the original req structure */
1387 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1388
1389 i = 0;
1390 /* Address 32 KB at a time */
1391 while ((i < req.entries) && (err == 0)) {
1392 if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
1393 creq->pmem.src[0].len = QCE_MAX_OPER_DATA;
1394 if (i > 0) {
1395 creq->pmem.src[0].offset =
1396 creq->pmem.src[i].offset;
1397 }
1398
1399 creq->data_len = QCE_MAX_OPER_DATA;
1400 creq->entries = 1;
1401
1402 err =
1403 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001404 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405
1406 creq->pmem.src[i].len = req.pmem.src[i].len -
1407 QCE_MAX_OPER_DATA;
1408 creq->pmem.src[i].offset =
1409 req.pmem.src[i].offset +
1410 QCE_MAX_OPER_DATA;
1411 req.pmem.src[i].offset =
1412 creq->pmem.src[i].offset;
1413 req.pmem.src[i].len = creq->pmem.src[i].len;
1414 } else {
1415 total = 0;
1416 for (j = i; j < req.entries; j++) {
1417 num_entries++;
1418 if ((total + creq->pmem.src[j].len)
1419 >= QCE_MAX_OPER_DATA) {
1420 creq->pmem.src[j].len =
1421 QCE_MAX_OPER_DATA - total;
1422 total = QCE_MAX_OPER_DATA;
1423 break;
1424 }
1425 total += creq->pmem.src[j].len;
1426 }
1427
1428 creq->data_len = total;
1429 if (i > 0)
1430 for (k = 0; k < num_entries; k++) {
1431 creq->pmem.src[k].len =
1432 creq->pmem.src[i+k].len;
1433 creq->pmem.src[k].offset =
1434 creq->pmem.src[i+k].offset;
1435 }
1436 creq->entries = num_entries;
1437
1438 i = j;
1439 err =
1440 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001441 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442 num_entries = 0;
1443
1444 creq->pmem.src[i].offset =
1445 req.pmem.src[i].offset +
1446 creq->pmem.src[i].len;
1447 creq->pmem.src[i].len =
1448 req.pmem.src[i].len -
1449 creq->pmem.src[i].len;
1450 req.pmem.src[i].offset =
1451 creq->pmem.src[i].offset;
1452 req.pmem.src[i].len =
1453 creq->pmem.src[i].len;
1454
1455 if (creq->pmem.src[i].len == 0)
1456 i++;
1457 }
1458
1459 } /* end of while ((i < req.entries) && (err == 0)) */
1460
1461 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001462 err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001463
1464 /* Restore the original req structure */
1465 for (i = 0; i < saved_req->entries; i++) {
1466 creq->pmem.src[i].len = saved_req->pmem.src[i].len;
1467 creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
1468 }
1469 creq->entries = saved_req->entries;
1470 creq->data_len = saved_req->data_len;
1471 kfree(saved_req);
1472
1473 return err;
1474
1475}
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001476#else
Ramesh Masavarapufa679d92011-10-13 23:42:59 -07001477static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
1478 struct qcedev_handle *handle)
1479{
1480 return -EPERM;
1481}
1482#endif/*CONFIG_ANDROID_PMEM*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483
1484static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001485 int *di, struct qcedev_handle *handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 uint8_t *k_align_src)
1487{
1488 int err = 0;
1489 int i = 0;
1490 int dst_i = *di;
1491 struct scatterlist sg_src;
1492 uint32_t byteoffset = 0;
1493 uint8_t *user_src = NULL;
1494 uint8_t *k_align_dst = k_align_src;
1495 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1496
1497
1498 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1499 byteoffset = areq->cipher_op_req.byteoffset;
1500
1501 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1502 if (user_src && __copy_from_user((k_align_src + byteoffset),
1503 (void __user *)user_src,
1504 areq->cipher_op_req.vbuf.src[0].len))
1505 return -EFAULT;
1506
1507 k_align_src += areq->cipher_op_req.vbuf.src[0].len;
1508
1509 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1510 user_src =
1511 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1512 if (user_src && __copy_from_user(k_align_src,
1513 (void __user *)user_src,
1514 areq->cipher_op_req.vbuf.src[i].len)) {
1515 return -EFAULT;
1516 }
1517 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1518 }
1519
1520 /* restore src beginning */
1521 k_align_src = k_align_dst;
1522 areq->cipher_op_req.data_len += byteoffset;
1523
1524 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1525 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1526
1527 /* In place encryption/decryption */
1528 sg_set_buf(areq->cipher_req.creq.src,
1529 k_align_dst,
1530 areq->cipher_op_req.data_len);
1531 sg_mark_end(areq->cipher_req.creq.src);
1532
1533 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1534 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1535 areq->cipher_op_req.entries = 1;
1536
Mona Hossain087c60b2011-07-20 10:34:57 -07001537 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001538
1539 /* copy data to destination buffer*/
1540 creq->data_len -= byteoffset;
1541
1542 while (creq->data_len > 0) {
1543 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1544 if (err == 0 && __copy_to_user(
1545 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1546 (k_align_dst + byteoffset),
1547 creq->vbuf.dst[dst_i].len))
1548 return -EFAULT;
1549
1550 k_align_dst += creq->vbuf.dst[dst_i].len +
1551 byteoffset;
1552 creq->data_len -= creq->vbuf.dst[dst_i].len;
1553 dst_i++;
1554 } else {
1555 if (err == 0 && __copy_to_user(
1556 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1557 (k_align_dst + byteoffset),
1558 creq->data_len))
1559 return -EFAULT;
1560
1561 k_align_dst += creq->data_len;
1562 creq->vbuf.dst[dst_i].len -= creq->data_len;
1563 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1564 creq->data_len = 0;
1565 }
1566 }
1567 *di = dst_i;
1568
1569 return err;
1570};
1571
1572static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001573 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001574{
1575 int err = 0;
1576 int di = 0;
1577 int i = 0;
1578 int j = 0;
1579 int k = 0;
1580 uint32_t byteoffset = 0;
1581 int num_entries = 0;
1582 uint32_t total = 0;
1583 uint32_t len;
1584 uint8_t *k_buf_src = NULL;
1585 uint8_t *k_align_src = NULL;
1586 uint32_t max_data_xfer;
1587 struct qcedev_cipher_op_req *saved_req;
1588 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1589
1590 /* Verify Source Address's */
1591 for (i = 0; i < areq->cipher_op_req.entries; i++)
1592 if (!access_ok(VERIFY_READ,
1593 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
1594 areq->cipher_op_req.vbuf.src[i].len))
1595 return -EFAULT;
1596
1597 /* Verify Destination Address's */
1598 if (areq->cipher_op_req.in_place_op != 1)
1599 for (i = 0; i < areq->cipher_op_req.entries; i++)
1600 if (!access_ok(VERIFY_READ,
1601 (void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
1602 areq->cipher_op_req.vbuf.dst[i].len))
1603 return -EFAULT;
1604
1605 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1606 byteoffset = areq->cipher_op_req.byteoffset;
1607 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1608 GFP_KERNEL);
1609 if (k_buf_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001610 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001611 __func__, (uint32_t)k_buf_src);
1612 return -ENOMEM;
1613 }
1614 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
1615 CACHE_LINE_SIZE);
1616 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1617
1618 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1619 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001620 pr_err("%s: Can't Allocate memory:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001621 __func__, (uint32_t)saved_req);
1622 kfree(k_buf_src);
1623 return -ENOMEM;
1624
1625 }
1626 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1627
1628 if (areq->cipher_op_req.data_len > max_data_xfer) {
1629 struct qcedev_cipher_op_req req;
1630
1631 /* save the original req structure */
1632 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1633
1634 i = 0;
1635 /* Address 32 KB at a time */
1636 while ((i < req.entries) && (err == 0)) {
1637 if (creq->vbuf.src[i].len > max_data_xfer) {
1638 creq->vbuf.src[0].len = max_data_xfer;
1639 if (i > 0) {
1640 creq->vbuf.src[0].vaddr =
1641 creq->vbuf.src[i].vaddr;
1642 }
1643
1644 creq->data_len = max_data_xfer;
1645 creq->entries = 1;
1646
1647 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001648 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 if (err < 0) {
1650 kfree(k_buf_src);
1651 kfree(saved_req);
1652 return err;
1653 }
1654
1655 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1656 max_data_xfer;
1657 creq->vbuf.src[i].vaddr =
1658 req.vbuf.src[i].vaddr +
1659 max_data_xfer;
1660 req.vbuf.src[i].vaddr =
1661 creq->vbuf.src[i].vaddr;
1662 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1663
1664 } else {
1665 total = areq->cipher_op_req.byteoffset;
1666 for (j = i; j < req.entries; j++) {
1667 num_entries++;
1668 if ((total + creq->vbuf.src[j].len)
1669 >= max_data_xfer) {
1670 creq->vbuf.src[j].len =
1671 max_data_xfer - total;
1672 total = max_data_xfer;
1673 break;
1674 }
1675 total += creq->vbuf.src[j].len;
1676 }
1677
1678 creq->data_len = total;
1679 if (i > 0)
1680 for (k = 0; k < num_entries; k++) {
1681 creq->vbuf.src[k].len =
1682 creq->vbuf.src[i+k].len;
1683 creq->vbuf.src[k].vaddr =
1684 creq->vbuf.src[i+k].vaddr;
1685 }
1686 creq->entries = num_entries;
1687
1688 i = j;
1689 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001690 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 if (err < 0) {
1692 kfree(k_buf_src);
1693 kfree(saved_req);
1694 return err;
1695 }
1696
1697 num_entries = 0;
1698 areq->cipher_op_req.byteoffset = 0;
1699
1700 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1701 + creq->vbuf.src[i].len;
1702 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1703 creq->vbuf.src[i].len;
1704
1705 req.vbuf.src[i].vaddr =
1706 creq->vbuf.src[i].vaddr;
1707 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1708
1709 if (creq->vbuf.src[i].len == 0)
1710 i++;
1711 }
1712
1713 areq->cipher_op_req.byteoffset = 0;
1714 max_data_xfer = QCE_MAX_OPER_DATA;
1715 byteoffset = 0;
1716
1717 } /* end of while ((i < req.entries) && (err == 0)) */
1718 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001719 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 k_align_src);
1721
1722 /* Restore the original req structure */
1723 for (i = 0; i < saved_req->entries; i++) {
1724 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1725 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1726 }
1727 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1728 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1729 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1730 len += saved_req->vbuf.dst[i].len;
1731 }
1732 creq->entries = saved_req->entries;
1733 creq->data_len = saved_req->data_len;
1734 creq->byteoffset = saved_req->byteoffset;
1735
1736 kfree(saved_req);
1737 kfree(k_buf_src);
1738 return err;
1739
1740}
1741
1742static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1743 struct qcedev_control *podev)
1744{
1745 if ((req->entries == 0) || (req->data_len == 0))
1746 goto error;
1747 if ((req->alg >= QCEDEV_ALG_LAST) ||
1748 (req->mode >= QCEDEV_AES_DES_MODE_LAST))
1749 goto error;
1750 if (req->alg == QCEDEV_ALG_AES) {
1751 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1752 (!podev->ce_support.aes_xts))
1753 goto error;
1754 /* if intending to use HW key make sure key fields are set
1755 * correctly and HW key is indeed supported in target
1756 */
1757 if (req->encklen == 0) {
1758 int i;
1759 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
1760 if (req->enckey[i])
1761 goto error;
1762 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1763 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1764 if (!podev->platform_support.hw_key_support)
1765 goto error;
1766 } else {
1767 if (req->encklen == QCEDEV_AES_KEY_192) {
1768 if (!podev->ce_support.aes_key_192)
1769 goto error;
1770 } else {
1771 /* if not using HW key make sure key
1772 * length is valid
1773 */
1774 if (!((req->encklen == QCEDEV_AES_KEY_128) ||
1775 (req->encklen == QCEDEV_AES_KEY_256)))
1776 goto error;
1777 }
1778 }
1779 }
1780 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1781 if (req->byteoffset) {
1782 if (req->mode != QCEDEV_AES_MODE_CTR)
1783 goto error;
1784 else { /* if using CTR mode make sure not using Pmem */
1785 if (req->use_pmem)
1786 goto error;
1787 }
1788 }
1789 /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
1790 if (req->use_pmem) {
1791 if (!req->in_place_op)
1792 goto error;
1793 }
1794 /* Ensure zer ivlen for ECB mode */
1795 if (req->ivlen != 0) {
1796 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1797 (req->mode == QCEDEV_DES_MODE_ECB))
1798 goto error;
1799 } else {
1800 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1801 (req->mode != QCEDEV_DES_MODE_ECB))
1802 goto error;
1803 }
1804
1805 return 0;
1806error:
1807 return -EINVAL;
1808
1809}
1810
1811static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1812 struct qcedev_control *podev)
1813{
1814 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1815 (!podev->ce_support.cmac))
1816 goto sha_error;
1817
1818 if ((req->entries == 0) || (req->data_len == 0))
1819 goto sha_error;
1820
1821 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
1822 goto sha_error;
1823
1824 return 0;
1825sha_error:
1826 return -EINVAL;
1827}
1828
1829static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1830{
1831 int err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -07001832 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833 struct qcedev_control *podev;
1834 struct qcedev_async_req qcedev_areq;
1835 struct qcedev_stat *pstat;
1836
Mona Hossain087c60b2011-07-20 10:34:57 -07001837 handle = file->private_data;
1838 podev = handle->cntl;
1839 qcedev_areq.handle = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001841 pr_err("%s: invalid handle %p\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842 __func__, podev);
1843 return -ENOENT;
1844 }
1845
1846 /* Verify user arguments. */
1847 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1848 return -ENOTTY;
1849
1850 init_completion(&qcedev_areq.complete);
1851 pstat = &_qcedev_stat[podev->pdev->id];
1852
1853 switch (cmd) {
1854 case QCEDEV_IOCTL_LOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001855 if (podev->platform_support.ce_shared)
1856 err = qcedev_lock_ce(podev);
1857 else
1858 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001859 break;
1860 case QCEDEV_IOCTL_UNLOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001861 if (podev->platform_support.ce_shared)
1862 err = qcedev_unlock_ce(podev);
1863 else
1864 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 break;
1866 case QCEDEV_IOCTL_ENC_REQ:
1867 case QCEDEV_IOCTL_DEC_REQ:
1868 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1869 sizeof(struct qcedev_cipher_op_req)))
1870 return -EFAULT;
1871
1872 if (__copy_from_user(&qcedev_areq.cipher_op_req,
1873 (void __user *)arg,
1874 sizeof(struct qcedev_cipher_op_req)))
1875 return -EFAULT;
1876 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1877
1878 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1879 podev))
1880 return -EINVAL;
1881
Ramesh Masavarapua63ff1e2011-10-20 10:51:25 -07001882 if (qcedev_areq.cipher_op_req.use_pmem)
Mona Hossain087c60b2011-07-20 10:34:57 -07001883 err = qcedev_pmem_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001884 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001885 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 if (err)
1887 return err;
1888 if (__copy_to_user((void __user *)arg,
1889 &qcedev_areq.cipher_op_req,
1890 sizeof(struct qcedev_cipher_op_req)))
1891 return -EFAULT;
1892 break;
1893
1894 case QCEDEV_IOCTL_SHA_INIT_REQ:
1895
1896 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1897 sizeof(struct qcedev_sha_op_req)))
1898 return -EFAULT;
1899
1900 if (__copy_from_user(&qcedev_areq.sha_op_req,
1901 (void __user *)arg,
1902 sizeof(struct qcedev_sha_op_req)))
1903 return -EFAULT;
1904 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1905 return -EINVAL;
1906 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001907 err = qcedev_hash_init(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908 if (err)
1909 return err;
1910 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1911 sizeof(struct qcedev_sha_op_req)))
1912 return -EFAULT;
1913 break;
1914 case QCEDEV_IOCTL_GET_CMAC_REQ:
1915 if (!podev->ce_support.cmac)
1916 return -ENOTTY;
1917 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1918 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1919 sizeof(struct qcedev_sha_op_req)))
1920 return -EFAULT;
1921
1922 if (__copy_from_user(&qcedev_areq.sha_op_req,
1923 (void __user *)arg,
1924 sizeof(struct qcedev_sha_op_req)))
1925 return -EFAULT;
1926 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1927 return -EINVAL;
1928 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1929
1930 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001931 err = qcedev_hash_cmac(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001932 if (err)
1933 return err;
1934 } else {
Mona Hossain087c60b2011-07-20 10:34:57 -07001935 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936 if (err)
1937 return err;
1938 }
1939
1940 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001941 &handle->sha_ctxt.digest[0],
1942 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001943 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1944 sizeof(struct qcedev_sha_op_req)))
1945 return -EFAULT;
1946 break;
1947
1948 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1949
1950 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1951 sizeof(struct qcedev_sha_op_req)))
1952 return -EFAULT;
1953
1954 if (__copy_from_user(&qcedev_areq.sha_op_req,
1955 (void __user *)arg,
1956 sizeof(struct qcedev_sha_op_req)))
1957 return -EFAULT;
1958 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1959 return -EINVAL;
1960 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001961 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962 if (err)
1963 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001964 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001965 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001966 &handle->sha_ctxt.digest[0],
1967 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1969 sizeof(struct qcedev_sha_op_req)))
1970 return -EFAULT;
1971 break;
1972
1973 case QCEDEV_IOCTL_GET_SHA_REQ:
1974
1975 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1976 sizeof(struct qcedev_sha_op_req)))
1977 return -EFAULT;
1978
1979 if (__copy_from_user(&qcedev_areq.sha_op_req,
1980 (void __user *)arg,
1981 sizeof(struct qcedev_sha_op_req)))
1982 return -EFAULT;
1983 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1984 return -EINVAL;
1985 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001986 qcedev_hash_init(&qcedev_areq, handle);
1987 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 if (err)
1989 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001990 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991 if (err)
1992 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001993 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001994 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001995 &handle->sha_ctxt.digest[0],
1996 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1998 sizeof(struct qcedev_sha_op_req)))
1999 return -EFAULT;
2000 break;
2001
2002 default:
2003 return -ENOTTY;
2004 }
2005
2006 return err;
2007}
2008
2009static int qcedev_probe(struct platform_device *pdev)
2010{
2011 void *handle = NULL;
2012 int rc = 0;
2013 struct qcedev_control *podev;
2014 struct msm_ce_hw_support *platform_support;
2015
2016 if (pdev->id >= MAX_QCE_DEVICE) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07002017 pr_err("%s: device id %d exceeds allowed %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018 __func__, pdev->id, MAX_QCE_DEVICE);
2019 return -ENOENT;
2020 }
2021 podev = &qce_dev[pdev->id];
2022
2023 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
2024 podev->platform_support.ce_shared = platform_support->ce_shared;
2025 podev->platform_support.shared_ce_resource =
2026 platform_support->shared_ce_resource;
2027 podev->platform_support.hw_key_support =
2028 platform_support->hw_key_support;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002029 podev->platform_support.bus_scale_table =
2030 platform_support->bus_scale_table;
Mona Hossain650c22c2011-07-19 09:54:19 -07002031 podev->ce_lock_count = 0;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002032 podev->high_bw_req_count = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002033 INIT_LIST_HEAD(&podev->ready_commands);
2034 podev->active_command = NULL;
2035
2036 spin_lock_init(&podev->lock);
2037
2038 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
2039
2040 /* open qce */
2041 handle = qce_open(pdev, &rc);
2042 if (handle == NULL) {
2043 platform_set_drvdata(pdev, NULL);
2044 return rc;
2045 }
2046
2047 podev->qce = handle;
2048 podev->pdev = pdev;
2049 platform_set_drvdata(pdev, podev);
2050 qce_hw_support(podev->qce, &podev->ce_support);
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002051
2052 if (podev->platform_support.bus_scale_table != NULL) {
2053 podev->bus_scale_handle =
2054 msm_bus_scale_register_client(
2055 (struct msm_bus_scale_pdata *)
2056 podev->platform_support.bus_scale_table);
2057 if (!podev->bus_scale_handle) {
2058 printk(KERN_ERR "%s not able to get bus scale\n",
2059 __func__);
2060 rc = -ENOMEM;
2061 goto err;
2062 }
2063 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 rc = misc_register(&podev->miscdevice);
2065
2066 if (rc >= 0)
2067 return 0;
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002068 else
2069 if (podev->platform_support.bus_scale_table != NULL)
2070 msm_bus_scale_unregister_client(
2071 podev->bus_scale_handle);
2072err:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002073
2074 if (handle)
2075 qce_close(handle);
2076 platform_set_drvdata(pdev, NULL);
2077 podev->qce = NULL;
2078 podev->pdev = NULL;
2079 return rc;
2080};
2081
2082static int qcedev_remove(struct platform_device *pdev)
2083{
2084 struct qcedev_control *podev;
2085
2086 podev = platform_get_drvdata(pdev);
2087 if (!podev)
2088 return 0;
2089 if (podev->qce)
2090 qce_close(podev->qce);
2091
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002092 if (podev->platform_support.bus_scale_table != NULL)
2093 msm_bus_scale_unregister_client(podev->bus_scale_handle);
2094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002095 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2096 misc_deregister(&podev->miscdevice);
2097 tasklet_kill(&podev->done_tasklet);
2098 return 0;
2099};
2100
2101static struct platform_driver qcedev_plat_driver = {
2102 .probe = qcedev_probe,
2103 .remove = qcedev_remove,
2104 .driver = {
2105 .name = "qce",
2106 .owner = THIS_MODULE,
2107 },
2108};
2109
2110static int _disp_stats(int id)
2111{
2112 struct qcedev_stat *pstat;
2113 int len = 0;
2114
2115 pstat = &_qcedev_stat[id];
2116 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2117 "\nQualcomm QCE dev driver %d Statistics:\n",
2118 id + 1);
2119
2120 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2121 " Encryption operation success : %d\n",
2122 pstat->qcedev_enc_success);
2123 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2124 " Encryption operation fail : %d\n",
2125 pstat->qcedev_enc_fail);
2126 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2127 " Decryption operation success : %d\n",
2128 pstat->qcedev_dec_success);
2129
2130 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2131 " Encryption operation fail : %d\n",
2132 pstat->qcedev_dec_fail);
2133
2134 return len;
2135}
2136
2137static int _debug_stats_open(struct inode *inode, struct file *file)
2138{
2139 file->private_data = inode->i_private;
2140 return 0;
2141}
2142
2143static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2144 size_t count, loff_t *ppos)
2145{
2146 int rc = -EINVAL;
2147 int qcedev = *((int *) file->private_data);
2148 int len;
2149
2150 len = _disp_stats(qcedev);
2151
2152 rc = simple_read_from_buffer((void __user *) buf, len,
2153 ppos, (void *) _debug_read_buf, len);
2154
2155 return rc;
2156}
2157
2158static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2159 size_t count, loff_t *ppos)
2160{
2161
2162 int qcedev = *((int *) file->private_data);
2163
2164 memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
2165 return count;
2166};
2167
2168static const struct file_operations _debug_stats_ops = {
2169 .open = _debug_stats_open,
2170 .read = _debug_stats_read,
2171 .write = _debug_stats_write,
2172};
2173
2174static int _qcedev_debug_init(void)
2175{
2176 int rc;
2177 char name[DEBUG_MAX_FNAME];
2178 int i;
2179 struct dentry *dent;
2180
2181 _debug_dent = debugfs_create_dir("qcedev", NULL);
2182 if (IS_ERR(_debug_dent)) {
2183 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2184 PTR_ERR(_debug_dent));
2185 return PTR_ERR(_debug_dent);
2186 }
2187
2188 for (i = 0; i < MAX_QCE_DEVICE; i++) {
2189 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
2190 _debug_qcedev[i] = i;
2191 dent = debugfs_create_file(name, 0644, _debug_dent,
2192 &_debug_qcedev[i], &_debug_stats_ops);
2193 if (dent == NULL) {
2194 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2195 PTR_ERR(dent));
2196 rc = PTR_ERR(dent);
2197 goto err;
2198 }
2199 }
2200 return 0;
2201err:
2202 debugfs_remove_recursive(_debug_dent);
2203 return rc;
2204}
2205
2206static int qcedev_init(void)
2207{
2208 int rc;
2209
2210 rc = _qcedev_debug_init();
2211 if (rc)
2212 return rc;
2213 return platform_driver_register(&qcedev_plat_driver);
2214}
2215
2216static void qcedev_exit(void)
2217{
2218 debugfs_remove_recursive(_debug_dent);
2219 platform_driver_unregister(&qcedev_plat_driver);
2220}
2221
2222MODULE_LICENSE("GPL v2");
2223MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2224MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
Mona Hossain313f4ec2012-03-06 13:46:14 -08002225MODULE_VERSION("1.26");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002226
2227module_init(qcedev_init);
2228module_exit(qcedev_exit);