blob: f2c881dc94b94c7fbdfc53e8933d4541ec3926f1 [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070046static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
47 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048{
49 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050 unsigned long gsbi_mem_phys_addr;
51 size_t gsbi_mem_size;
52 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070056 return 0;
57
58 gsbi_mem_phys_addr = resource->start;
59 gsbi_mem_size = resource_size(resource);
60 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
61 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063
64 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size);
66 if (!gsbi_base)
67 return -ENXIO;
68
69 /* Set GSBI to SPI mode */
70 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 return 0;
73}
74
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070077 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
78 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
79 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
80 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
81 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
82 if (dd->qup_ver)
83 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084}
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086static inline int msm_spi_request_gpios(struct msm_spi *dd)
87{
88 int i;
89 int result = 0;
90
91 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
92 if (dd->spi_gpios[i] >= 0) {
93 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
94 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060095 dev_err(dd->dev, "%s: gpio_request for pin %d "
96 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 dd->spi_gpios[i], result);
98 goto error;
99 }
100 }
101 }
102 return 0;
103
104error:
105 for (; --i >= 0;) {
106 if (dd->spi_gpios[i] >= 0)
107 gpio_free(dd->spi_gpios[i]);
108 }
109 return result;
110}
111
112static inline void msm_spi_free_gpios(struct msm_spi *dd)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
117 if (dd->spi_gpios[i] >= 0)
118 gpio_free(dd->spi_gpios[i]);
119 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600120
121 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
122 if (dd->cs_gpios[i].valid) {
123 gpio_free(dd->cs_gpios[i].gpio_num);
124 dd->cs_gpios[i].valid = 0;
125 }
126 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127}
128
129static void msm_spi_clock_set(struct msm_spi *dd, int speed)
130{
131 int rc;
132
133 rc = clk_set_rate(dd->clk, speed);
134 if (!rc)
135 dd->clock_speed = speed;
136}
137
138static int msm_spi_calculate_size(int *fifo_size,
139 int *block_size,
140 int block,
141 int mult)
142{
143 int words;
144
145 switch (block) {
146 case 0:
147 words = 1; /* 4 bytes */
148 break;
149 case 1:
150 words = 4; /* 16 bytes */
151 break;
152 case 2:
153 words = 8; /* 32 bytes */
154 break;
155 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 switch (mult) {
160 case 0:
161 *fifo_size = words * 2;
162 break;
163 case 1:
164 *fifo_size = words * 4;
165 break;
166 case 2:
167 *fifo_size = words * 8;
168 break;
169 case 3:
170 *fifo_size = words * 16;
171 break;
172 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 *block_size = words * sizeof(u32); /* in bytes */
177 return 0;
178}
179
180static void get_next_transfer(struct msm_spi *dd)
181{
182 struct spi_transfer *t = dd->cur_transfer;
183
184 if (t->transfer_list.next != &dd->cur_msg->transfers) {
185 dd->cur_transfer = list_entry(t->transfer_list.next,
186 struct spi_transfer,
187 transfer_list);
188 dd->write_buf = dd->cur_transfer->tx_buf;
189 dd->read_buf = dd->cur_transfer->rx_buf;
190 }
191}
192
193static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
194{
195 u32 spi_iom;
196 int block;
197 int mult;
198
199 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
200
201 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
202 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
203 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
204 block, mult)) {
205 goto fifo_size_err;
206 }
207
208 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
209 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
210 if (msm_spi_calculate_size(&dd->output_fifo_size,
211 &dd->output_block_size, block, mult)) {
212 goto fifo_size_err;
213 }
214 /* DM mode is not available for this block size */
215 if (dd->input_block_size == 4 || dd->output_block_size == 4)
216 dd->use_dma = 0;
217
218 /* DM mode is currently unsupported for different block sizes */
219 if (dd->input_block_size != dd->output_block_size)
220 dd->use_dma = 0;
221
222 if (dd->use_dma)
223 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
224
225 return;
226
227fifo_size_err:
228 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700229 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return;
231}
232
233static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
234{
235 u32 data_in;
236 int i;
237 int shift;
238
239 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
240 if (dd->read_buf) {
241 for (i = 0; (i < dd->bytes_per_word) &&
242 dd->rx_bytes_remaining; i++) {
243 /* The data format depends on bytes_per_word:
244 4 bytes: 0x12345678
245 3 bytes: 0x00123456
246 2 bytes: 0x00001234
247 1 byte : 0x00000012
248 */
249 shift = 8 * (dd->bytes_per_word - i - 1);
250 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
251 dd->rx_bytes_remaining--;
252 }
253 } else {
254 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
255 dd->rx_bytes_remaining -= dd->bytes_per_word;
256 else
257 dd->rx_bytes_remaining = 0;
258 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 dd->read_xfr_cnt++;
261 if (dd->multi_xfr) {
262 if (!dd->rx_bytes_remaining)
263 dd->read_xfr_cnt = 0;
264 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
265 dd->read_len) {
266 struct spi_transfer *t = dd->cur_rx_transfer;
267 if (t->transfer_list.next != &dd->cur_msg->transfers) {
268 t = list_entry(t->transfer_list.next,
269 struct spi_transfer,
270 transfer_list);
271 dd->read_buf = t->rx_buf;
272 dd->read_len = t->len;
273 dd->read_xfr_cnt = 0;
274 dd->cur_rx_transfer = t;
275 }
276 }
277 }
278}
279
280static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
281{
282 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
283
284 return spi_op & SPI_OP_STATE_VALID;
285}
286
287static inline int msm_spi_wait_valid(struct msm_spi *dd)
288{
289 unsigned long delay = 0;
290 unsigned long timeout = 0;
291
292 if (dd->clock_speed == 0)
293 return -EINVAL;
294 /*
295 * Based on the SPI clock speed, sufficient time
296 * should be given for the SPI state transition
297 * to occur
298 */
299 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
300 /*
301 * For small delay values, the default timeout would
302 * be one jiffy
303 */
304 if (delay < SPI_DELAY_THRESHOLD)
305 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600306
307 /* Adding one to round off to the nearest jiffy */
308 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 while (!msm_spi_is_valid_state(dd)) {
310 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600311 if (!msm_spi_is_valid_state(dd)) {
312 if (dd->cur_msg)
313 dd->cur_msg->status = -EIO;
314 dev_err(dd->dev, "%s: SPI operational state"
315 "not valid\n", __func__);
316 return -ETIMEDOUT;
317 } else
318 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 }
320 /*
321 * For smaller values of delay, context switch time
322 * would negate the usage of usleep
323 */
324 if (delay > 20)
325 usleep(delay);
326 else if (delay)
327 udelay(delay);
328 }
329 return 0;
330}
331
332static inline int msm_spi_set_state(struct msm_spi *dd,
333 enum msm_spi_state state)
334{
335 enum msm_spi_state cur_state;
336 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700337 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338 cur_state = readl_relaxed(dd->base + SPI_STATE);
339 /* Per spec:
340 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
341 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
342 (state == SPI_OP_STATE_RESET)) {
343 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
344 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
345 } else {
346 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
347 dd->base + SPI_STATE);
348 }
349 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700350 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351
352 return 0;
353}
354
355static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
356{
357 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
358
359 if (n != (*config & SPI_CFG_N))
360 *config = (*config & ~SPI_CFG_N) | n;
361
362 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
363 if (dd->read_buf == NULL)
364 *config |= SPI_NO_INPUT;
365 if (dd->write_buf == NULL)
366 *config |= SPI_NO_OUTPUT;
367 }
368}
369
370static void msm_spi_set_config(struct msm_spi *dd, int bpw)
371{
372 u32 spi_config;
373
374 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
375
376 if (dd->cur_msg->spi->mode & SPI_CPHA)
377 spi_config &= ~SPI_CFG_INPUT_FIRST;
378 else
379 spi_config |= SPI_CFG_INPUT_FIRST;
380 if (dd->cur_msg->spi->mode & SPI_LOOP)
381 spi_config |= SPI_CFG_LOOPBACK;
382 else
383 spi_config &= ~SPI_CFG_LOOPBACK;
384 msm_spi_add_configs(dd, &spi_config, bpw-1);
385 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
386 msm_spi_set_qup_config(dd, bpw);
387}
388
389static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
390{
391 dmov_box *box;
392 int bytes_to_send, num_rows, bytes_sent;
393 u32 num_transfers;
394
395 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530396 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 if (dd->write_len && !dd->read_len) {
398 /* WR-WR transfer */
399 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
400 dd->write_buf = dd->temp_buf;
401 } else {
402 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
403 /* For WR-RD transfer, bytes_sent can be negative */
404 if (bytes_sent < 0)
405 bytes_sent = 0;
406 }
407
Kiran Gundae8f16742012-06-27 10:06:32 +0530408 /* We'll send in chunks of SPI_MAX_LEN if larger than
409 * 4K bytes for targets that doesn't support infinite
410 * mode. Make sure this doesn't happen on targets that
411 * support infinite mode.
412 */
413 if (!dd->pdata->infinite_mode)
414 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
415 SPI_MAX_LEN : dd->tx_bytes_remaining;
416 else
417 bytes_to_send = dd->tx_bytes_remaining;
418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
420 dd->unaligned_len = bytes_to_send % dd->burst_size;
421 num_rows = bytes_to_send / dd->burst_size;
422
423 dd->mode = SPI_DMOV_MODE;
424
425 if (num_rows) {
426 /* src in 16 MSB, dst in 16 LSB */
427 box = &dd->tx_dmov_cmd->box;
428 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
429 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
430 box->num_rows = (num_rows << 16) | num_rows;
431 box->row_offset = (dd->burst_size << 16) | 0;
432
433 box = &dd->rx_dmov_cmd->box;
434 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
435 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
436 box->num_rows = (num_rows << 16) | num_rows;
437 box->row_offset = (0 << 16) | dd->burst_size;
438
439 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
440 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
441 offsetof(struct spi_dmov_cmd, box));
442 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
443 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
444 offsetof(struct spi_dmov_cmd, box));
445 } else {
446 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
447 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
448 offsetof(struct spi_dmov_cmd, single_pad));
449 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
450 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
451 offsetof(struct spi_dmov_cmd, single_pad));
452 }
453
454 if (!dd->unaligned_len) {
455 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
456 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
457 } else {
458 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
459 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
460 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
461
462 if ((dd->multi_xfr) && (dd->read_len <= 0))
463 offset = dd->cur_msg_len - dd->unaligned_len;
464
465 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
466 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
467
468 memset(dd->tx_padding, 0, dd->burst_size);
469 memset(dd->rx_padding, 0, dd->burst_size);
470 if (dd->write_buf)
471 memcpy(dd->tx_padding, dd->write_buf + offset,
472 dd->unaligned_len);
473
474 tx_cmd->src = dd->tx_padding_dma;
475 rx_cmd->dst = dd->rx_padding_dma;
476 tx_cmd->len = rx_cmd->len = dd->burst_size;
477 }
478 /* This also takes care of the padding dummy buf
479 Since this is set to the correct length, the
480 dummy bytes won't be actually sent */
481 if (dd->multi_xfr) {
482 u32 write_transfers = 0;
483 u32 read_transfers = 0;
484
485 if (dd->write_len > 0) {
486 write_transfers = DIV_ROUND_UP(dd->write_len,
487 dd->bytes_per_word);
488 writel_relaxed(write_transfers,
489 dd->base + SPI_MX_OUTPUT_COUNT);
490 }
491 if (dd->read_len > 0) {
492 /*
493 * The read following a write transfer must take
494 * into account, that the bytes pertaining to
495 * the write transfer needs to be discarded,
496 * before the actual read begins.
497 */
498 read_transfers = DIV_ROUND_UP(dd->read_len +
499 dd->write_len,
500 dd->bytes_per_word);
501 writel_relaxed(read_transfers,
502 dd->base + SPI_MX_INPUT_COUNT);
503 }
504 } else {
505 if (dd->write_buf)
506 writel_relaxed(num_transfers,
507 dd->base + SPI_MX_OUTPUT_COUNT);
508 if (dd->read_buf)
509 writel_relaxed(num_transfers,
510 dd->base + SPI_MX_INPUT_COUNT);
511 }
512}
513
514static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
515{
516 dma_coherent_pre_ops();
517 if (dd->write_buf)
518 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
519 if (dd->read_buf)
520 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
521}
522
Kiran Gundae8f16742012-06-27 10:06:32 +0530523/* SPI core on targets that does not support infinite mode can send maximum of
524 4K transfers, Therefore, we are sending several chunks of 3K or less
525 (depending on how much is left). Upon completion we send the next chunk,
526 or complete the transfer if everything is finished. On targets that support
527 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528*/
529static int msm_spi_dm_send_next(struct msm_spi *dd)
530{
531 /* By now we should have sent all the bytes in FIFO mode,
532 * However to make things right, we'll check anyway.
533 */
534 if (dd->mode != SPI_DMOV_MODE)
535 return 0;
536
Kiran Gundae8f16742012-06-27 10:06:32 +0530537 /* On targets which does not support infinite mode,
538 We need to send more chunks, if we sent max last time */
539 if ((!dd->pdata->infinite_mode) &&
540 (dd->tx_bytes_remaining > SPI_MAX_LEN)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 dd->tx_bytes_remaining -= SPI_MAX_LEN;
542 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
543 return 0;
544 dd->read_len = dd->write_len = 0;
545 msm_spi_setup_dm_transfer(dd);
546 msm_spi_enqueue_dm_commands(dd);
547 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
548 return 0;
549 return 1;
550 } else if (dd->read_len && dd->write_len) {
551 dd->tx_bytes_remaining -= dd->cur_transfer->len;
552 if (list_is_last(&dd->cur_transfer->transfer_list,
553 &dd->cur_msg->transfers))
554 return 0;
555 get_next_transfer(dd);
556 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
557 return 0;
558 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
559 dd->read_buf = dd->temp_buf;
560 dd->read_len = dd->write_len = -1;
561 msm_spi_setup_dm_transfer(dd);
562 msm_spi_enqueue_dm_commands(dd);
563 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
564 return 0;
565 return 1;
566 }
567 return 0;
568}
569
570static inline void msm_spi_ack_transfer(struct msm_spi *dd)
571{
572 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
573 SPI_OP_MAX_OUTPUT_DONE_FLAG,
574 dd->base + SPI_OPERATIONAL);
575 /* Ensure done flag was cleared before proceeding further */
576 mb();
577}
578
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700579/* Figure which irq occured and call the relevant functions */
580static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
581{
582 u32 op, ret = IRQ_NONE;
583 struct msm_spi *dd = dev_id;
584
585 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
586 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
587 struct spi_master *master = dev_get_drvdata(dd->dev);
588 ret |= msm_spi_error_irq(irq, master);
589 }
590
591 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
592 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
593 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
594 dd->base + SPI_OPERATIONAL);
595 /*
596 * Ensure service flag was cleared before further
597 * processing of interrupt.
598 */
599 mb();
600 ret |= msm_spi_input_irq(irq, dev_id);
601 }
602
603 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
604 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
605 dd->base + SPI_OPERATIONAL);
606 /*
607 * Ensure service flag was cleared before further
608 * processing of interrupt.
609 */
610 mb();
611 ret |= msm_spi_output_irq(irq, dev_id);
612 }
613
614 if (dd->done) {
615 complete(&dd->transfer_complete);
616 dd->done = 0;
617 }
618 return ret;
619}
620
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
622{
623 struct msm_spi *dd = dev_id;
624
625 dd->stat_rx++;
626
627 if (dd->mode == SPI_MODE_NONE)
628 return IRQ_HANDLED;
629
630 if (dd->mode == SPI_DMOV_MODE) {
631 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
632 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
633 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
634 msm_spi_ack_transfer(dd);
635 if (dd->unaligned_len == 0) {
636 if (atomic_inc_return(&dd->rx_irq_called) == 1)
637 return IRQ_HANDLED;
638 }
639 msm_spi_complete(dd);
640 return IRQ_HANDLED;
641 }
642 return IRQ_NONE;
643 }
644
645 if (dd->mode == SPI_FIFO_MODE) {
646 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
647 SPI_OP_IP_FIFO_NOT_EMPTY) &&
648 (dd->rx_bytes_remaining > 0)) {
649 msm_spi_read_word_from_fifo(dd);
650 }
651 if (dd->rx_bytes_remaining == 0)
652 msm_spi_complete(dd);
653 }
654
655 return IRQ_HANDLED;
656}
657
658static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
659{
660 u32 word;
661 u8 byte;
662 int i;
663
664 word = 0;
665 if (dd->write_buf) {
666 for (i = 0; (i < dd->bytes_per_word) &&
667 dd->tx_bytes_remaining; i++) {
668 dd->tx_bytes_remaining--;
669 byte = *dd->write_buf++;
670 word |= (byte << (BITS_PER_BYTE * (3 - i)));
671 }
672 } else
673 if (dd->tx_bytes_remaining > dd->bytes_per_word)
674 dd->tx_bytes_remaining -= dd->bytes_per_word;
675 else
676 dd->tx_bytes_remaining = 0;
677 dd->write_xfr_cnt++;
678 if (dd->multi_xfr) {
679 if (!dd->tx_bytes_remaining)
680 dd->write_xfr_cnt = 0;
681 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
682 dd->write_len) {
683 struct spi_transfer *t = dd->cur_tx_transfer;
684 if (t->transfer_list.next != &dd->cur_msg->transfers) {
685 t = list_entry(t->transfer_list.next,
686 struct spi_transfer,
687 transfer_list);
688 dd->write_buf = t->tx_buf;
689 dd->write_len = t->len;
690 dd->write_xfr_cnt = 0;
691 dd->cur_tx_transfer = t;
692 }
693 }
694 }
695 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
696}
697
698static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
699{
700 int count = 0;
701
702 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
703 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
704 SPI_OP_OUTPUT_FIFO_FULL)) {
705 msm_spi_write_word_to_fifo(dd);
706 count++;
707 }
708}
709
710static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
711{
712 struct msm_spi *dd = dev_id;
713
714 dd->stat_tx++;
715
716 if (dd->mode == SPI_MODE_NONE)
717 return IRQ_HANDLED;
718
719 if (dd->mode == SPI_DMOV_MODE) {
720 /* TX_ONLY transaction is handled here
721 This is the only place we send complete at tx and not rx */
722 if (dd->read_buf == NULL &&
723 readl_relaxed(dd->base + SPI_OPERATIONAL) &
724 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
725 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530726 if (atomic_inc_return(&dd->tx_irq_called) == 1)
727 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728 msm_spi_complete(dd);
729 return IRQ_HANDLED;
730 }
731 return IRQ_NONE;
732 }
733
734 /* Output FIFO is empty. Transmit any outstanding write data. */
735 if (dd->mode == SPI_FIFO_MODE)
736 msm_spi_write_rmn_to_fifo(dd);
737
738 return IRQ_HANDLED;
739}
740
741static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
742{
743 struct spi_master *master = dev_id;
744 struct msm_spi *dd = spi_master_get_devdata(master);
745 u32 spi_err;
746
747 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
748 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
749 dev_warn(master->dev.parent, "SPI output overrun error\n");
750 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
751 dev_warn(master->dev.parent, "SPI input underrun error\n");
752 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
753 dev_warn(master->dev.parent, "SPI output underrun error\n");
754 msm_spi_get_clk_err(dd, &spi_err);
755 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
756 dev_warn(master->dev.parent, "SPI clock overrun error\n");
757 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
758 dev_warn(master->dev.parent, "SPI clock underrun error\n");
759 msm_spi_clear_error_flags(dd);
760 msm_spi_ack_clk_err(dd);
761 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
762 mb();
763 return IRQ_HANDLED;
764}
765
766static int msm_spi_map_dma_buffers(struct msm_spi *dd)
767{
768 struct device *dev;
769 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600770 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 void *tx_buf, *rx_buf;
772 unsigned tx_len, rx_len;
773 int ret = -EINVAL;
774
775 dev = &dd->cur_msg->spi->dev;
776 first_xfr = dd->cur_transfer;
777 tx_buf = (void *)first_xfr->tx_buf;
778 rx_buf = first_xfr->rx_buf;
779 tx_len = rx_len = first_xfr->len;
780
781 /*
782 * For WR-WR and WR-RD transfers, we allocate our own temporary
783 * buffer and copy the data to/from the client buffers.
784 */
785 if (dd->multi_xfr) {
786 dd->temp_buf = kzalloc(dd->cur_msg_len,
787 GFP_KERNEL | __GFP_DMA);
788 if (!dd->temp_buf)
789 return -ENOMEM;
790 nxt_xfr = list_entry(first_xfr->transfer_list.next,
791 struct spi_transfer, transfer_list);
792
793 if (dd->write_len && !dd->read_len) {
794 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
795 goto error;
796
797 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
798 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
799 nxt_xfr->len);
800 tx_buf = dd->temp_buf;
801 tx_len = dd->cur_msg_len;
802 } else {
803 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
804 goto error;
805
806 rx_buf = dd->temp_buf;
807 rx_len = dd->cur_msg_len;
808 }
809 }
810 if (tx_buf != NULL) {
811 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
812 tx_len, DMA_TO_DEVICE);
813 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
814 dev_err(dev, "dma %cX %d bytes error\n",
815 'T', tx_len);
816 ret = -ENOMEM;
817 goto error;
818 }
819 }
820 if (rx_buf != NULL) {
821 dma_addr_t dma_handle;
822 dma_handle = dma_map_single(dev, rx_buf,
823 rx_len, DMA_FROM_DEVICE);
824 if (dma_mapping_error(NULL, dma_handle)) {
825 dev_err(dev, "dma %cX %d bytes error\n",
826 'R', rx_len);
827 if (tx_buf != NULL)
828 dma_unmap_single(NULL, first_xfr->tx_dma,
829 tx_len, DMA_TO_DEVICE);
830 ret = -ENOMEM;
831 goto error;
832 }
833 if (dd->multi_xfr)
834 nxt_xfr->rx_dma = dma_handle;
835 else
836 first_xfr->rx_dma = dma_handle;
837 }
838 return 0;
839
840error:
841 kfree(dd->temp_buf);
842 dd->temp_buf = NULL;
843 return ret;
844}
845
846static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
847{
848 struct device *dev;
849 u32 offset;
850
851 dev = &dd->cur_msg->spi->dev;
852 if (dd->cur_msg->is_dma_mapped)
853 goto unmap_end;
854
855 if (dd->multi_xfr) {
856 if (dd->write_len && !dd->read_len) {
857 dma_unmap_single(dev,
858 dd->cur_transfer->tx_dma,
859 dd->cur_msg_len,
860 DMA_TO_DEVICE);
861 } else {
862 struct spi_transfer *prev_xfr;
863 prev_xfr = list_entry(
864 dd->cur_transfer->transfer_list.prev,
865 struct spi_transfer,
866 transfer_list);
867 if (dd->cur_transfer->rx_buf) {
868 dma_unmap_single(dev,
869 dd->cur_transfer->rx_dma,
870 dd->cur_msg_len,
871 DMA_FROM_DEVICE);
872 }
873 if (prev_xfr->tx_buf) {
874 dma_unmap_single(dev,
875 prev_xfr->tx_dma,
876 prev_xfr->len,
877 DMA_TO_DEVICE);
878 }
879 if (dd->unaligned_len && dd->read_buf) {
880 offset = dd->cur_msg_len - dd->unaligned_len;
881 dma_coherent_post_ops();
882 memcpy(dd->read_buf + offset, dd->rx_padding,
883 dd->unaligned_len);
884 memcpy(dd->cur_transfer->rx_buf,
885 dd->read_buf + prev_xfr->len,
886 dd->cur_transfer->len);
887 }
888 }
889 kfree(dd->temp_buf);
890 dd->temp_buf = NULL;
891 return;
892 } else {
893 if (dd->cur_transfer->rx_buf)
894 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
895 dd->cur_transfer->len,
896 DMA_FROM_DEVICE);
897 if (dd->cur_transfer->tx_buf)
898 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
899 dd->cur_transfer->len,
900 DMA_TO_DEVICE);
901 }
902
903unmap_end:
904 /* If we padded the transfer, we copy it from the padding buf */
905 if (dd->unaligned_len && dd->read_buf) {
906 offset = dd->cur_transfer->len - dd->unaligned_len;
907 dma_coherent_post_ops();
908 memcpy(dd->read_buf + offset, dd->rx_padding,
909 dd->unaligned_len);
910 }
911}
912
913/**
914 * msm_use_dm - decides whether to use data mover for this
915 * transfer
916 * @dd: device
917 * @tr: transfer
918 *
919 * Start using DM if:
920 * 1. Transfer is longer than 3*block size.
921 * 2. Buffers should be aligned to cache line.
922 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
923 */
924static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
925 u8 bpw)
926{
927 u32 cache_line = dma_get_cache_alignment();
928
929 if (!dd->use_dma)
930 return 0;
931
932 if (dd->cur_msg_len < 3*dd->input_block_size)
933 return 0;
934
935 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
936 return 0;
937
938 if (tr->tx_buf) {
939 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
940 return 0;
941 }
942 if (tr->rx_buf) {
943 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
944 return 0;
945 }
946
947 if (tr->cs_change &&
948 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
949 return 0;
950 return 1;
951}
952
953static void msm_spi_process_transfer(struct msm_spi *dd)
954{
955 u8 bpw;
956 u32 spi_ioc;
957 u32 spi_iom;
958 u32 spi_ioc_orig;
959 u32 max_speed;
960 u32 chip_select;
961 u32 read_count;
962 u32 timeout;
963 u32 int_loopback = 0;
964
965 dd->tx_bytes_remaining = dd->cur_msg_len;
966 dd->rx_bytes_remaining = dd->cur_msg_len;
967 dd->read_buf = dd->cur_transfer->rx_buf;
968 dd->write_buf = dd->cur_transfer->tx_buf;
969 init_completion(&dd->transfer_complete);
970 if (dd->cur_transfer->bits_per_word)
971 bpw = dd->cur_transfer->bits_per_word;
972 else
973 if (dd->cur_msg->spi->bits_per_word)
974 bpw = dd->cur_msg->spi->bits_per_word;
975 else
976 bpw = 8;
977 dd->bytes_per_word = (bpw + 7) / 8;
978
979 if (dd->cur_transfer->speed_hz)
980 max_speed = dd->cur_transfer->speed_hz;
981 else
982 max_speed = dd->cur_msg->spi->max_speed_hz;
983 if (!dd->clock_speed || max_speed != dd->clock_speed)
984 msm_spi_clock_set(dd, max_speed);
985
986 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
987 if (dd->cur_msg->spi->mode & SPI_LOOP)
988 int_loopback = 1;
989 if (int_loopback && dd->multi_xfr &&
990 (read_count > dd->input_fifo_size)) {
991 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700992 pr_err(
993 "%s:Internal Loopback does not support > fifo size"
994 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 __func__);
996 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700997 pr_err(
998 "%s:Internal Loopback does not support > fifo size"
999 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 __func__);
1001 return;
1002 }
1003 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1004 dd->mode = SPI_FIFO_MODE;
1005 if (dd->multi_xfr) {
1006 dd->read_len = dd->cur_transfer->len;
1007 dd->write_len = dd->cur_transfer->len;
1008 }
1009 /* read_count cannot exceed fifo_size, and only one READ COUNT
1010 interrupt is generated per transaction, so for transactions
1011 larger than fifo size READ COUNT must be disabled.
1012 For those transactions we usually move to Data Mover mode.
1013 */
1014 if (read_count <= dd->input_fifo_size) {
1015 writel_relaxed(read_count,
1016 dd->base + SPI_MX_READ_COUNT);
1017 msm_spi_set_write_count(dd, read_count);
1018 } else {
1019 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1020 msm_spi_set_write_count(dd, 0);
1021 }
1022 } else {
1023 dd->mode = SPI_DMOV_MODE;
1024 if (dd->write_len && dd->read_len) {
1025 dd->tx_bytes_remaining = dd->write_len;
1026 dd->rx_bytes_remaining = dd->read_len;
1027 }
1028 }
1029
1030 /* Write mode - fifo or data mover*/
1031 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1032 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1033 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1034 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1035 /* Turn on packing for data mover */
1036 if (dd->mode == SPI_DMOV_MODE)
1037 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1038 else
1039 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1040 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1041
1042 msm_spi_set_config(dd, bpw);
1043
1044 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1045 spi_ioc_orig = spi_ioc;
1046 if (dd->cur_msg->spi->mode & SPI_CPOL)
1047 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1048 else
1049 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1050 chip_select = dd->cur_msg->spi->chip_select << 2;
1051 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1052 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1053 if (!dd->cur_transfer->cs_change)
1054 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1055 if (spi_ioc != spi_ioc_orig)
1056 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1057
1058 if (dd->mode == SPI_DMOV_MODE) {
1059 msm_spi_setup_dm_transfer(dd);
1060 msm_spi_enqueue_dm_commands(dd);
1061 }
1062 /* The output fifo interrupt handler will handle all writes after
1063 the first. Restricting this to one write avoids contention
1064 issues and race conditions between this thread and the int handler
1065 */
1066 else if (dd->mode == SPI_FIFO_MODE) {
1067 if (msm_spi_prepare_for_write(dd))
1068 goto transfer_end;
1069 msm_spi_start_write(dd, read_count);
1070 }
1071
1072 /* Only enter the RUN state after the first word is written into
1073 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1074 might fire before the first word is written resulting in a
1075 possible race condition.
1076 */
1077 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1078 goto transfer_end;
1079
1080 timeout = 100 * msecs_to_jiffies(
1081 DIV_ROUND_UP(dd->cur_msg_len * 8,
1082 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1083
1084 /* Assume success, this might change later upon transaction result */
1085 dd->cur_msg->status = 0;
1086 do {
1087 if (!wait_for_completion_timeout(&dd->transfer_complete,
1088 timeout)) {
1089 dev_err(dd->dev, "%s: SPI transaction "
1090 "timeout\n", __func__);
1091 dd->cur_msg->status = -EIO;
1092 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001093 msm_dmov_flush(dd->tx_dma_chan, 1);
1094 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 }
1096 break;
1097 }
1098 } while (msm_spi_dm_send_next(dd));
1099
1100transfer_end:
1101 if (dd->mode == SPI_DMOV_MODE)
1102 msm_spi_unmap_dma_buffers(dd);
1103 dd->mode = SPI_MODE_NONE;
1104
1105 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1106 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1107 dd->base + SPI_IO_CONTROL);
1108}
1109
1110static void get_transfer_length(struct msm_spi *dd)
1111{
1112 struct spi_transfer *tr;
1113 int num_xfrs = 0;
1114 int readlen = 0;
1115 int writelen = 0;
1116
1117 dd->cur_msg_len = 0;
1118 dd->multi_xfr = 0;
1119 dd->read_len = dd->write_len = 0;
1120
1121 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1122 if (tr->tx_buf)
1123 writelen += tr->len;
1124 if (tr->rx_buf)
1125 readlen += tr->len;
1126 dd->cur_msg_len += tr->len;
1127 num_xfrs++;
1128 }
1129
1130 if (num_xfrs == 2) {
1131 struct spi_transfer *first_xfr = dd->cur_transfer;
1132
1133 dd->multi_xfr = 1;
1134 tr = list_entry(first_xfr->transfer_list.next,
1135 struct spi_transfer,
1136 transfer_list);
1137 /*
1138 * We update dd->read_len and dd->write_len only
1139 * for WR-WR and WR-RD transfers.
1140 */
1141 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1142 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1143 ((!tr->tx_buf) && (tr->rx_buf))) {
1144 dd->read_len = readlen;
1145 dd->write_len = writelen;
1146 }
1147 }
1148 } else if (num_xfrs > 1)
1149 dd->multi_xfr = 1;
1150}
1151
1152static inline int combine_transfers(struct msm_spi *dd)
1153{
1154 struct spi_transfer *t = dd->cur_transfer;
1155 struct spi_transfer *nxt;
1156 int xfrs_grped = 1;
1157
1158 dd->cur_msg_len = dd->cur_transfer->len;
1159 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1160 nxt = list_entry(t->transfer_list.next,
1161 struct spi_transfer,
1162 transfer_list);
1163 if (t->cs_change != nxt->cs_change)
1164 return xfrs_grped;
1165 dd->cur_msg_len += nxt->len;
1166 xfrs_grped++;
1167 t = nxt;
1168 }
1169 return xfrs_grped;
1170}
1171
Harini Jayaraman093938a2012-04-20 15:33:23 -06001172static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1173{
1174 u32 spi_ioc;
1175 u32 spi_ioc_orig;
1176
1177 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1178 spi_ioc_orig = spi_ioc;
1179 if (set_flag)
1180 spi_ioc |= SPI_IO_C_FORCE_CS;
1181 else
1182 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1183
1184 if (spi_ioc != spi_ioc_orig)
1185 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1186}
1187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188static void msm_spi_process_message(struct msm_spi *dd)
1189{
1190 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001191 int cs_num;
1192 int rc;
1193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001195 cs_num = dd->cur_msg->spi->chip_select;
1196 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1197 (!(dd->cs_gpios[cs_num].valid)) &&
1198 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1199 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1200 spi_cs_rsrcs[cs_num]);
1201 if (rc) {
1202 dev_err(dd->dev, "gpio_request for pin %d failed with "
1203 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1204 rc);
1205 return;
1206 }
1207 dd->cs_gpios[cs_num].valid = 1;
1208 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001210 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001211 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001213 &dd->cur_msg->transfers,
1214 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001215 struct spi_transfer *t = dd->cur_transfer;
1216 struct spi_transfer *nxt;
1217
1218 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1219 nxt = list_entry(t->transfer_list.next,
1220 struct spi_transfer,
1221 transfer_list);
1222
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001223 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001224 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001225 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001226 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001228
1229 dd->cur_msg_len = dd->cur_transfer->len;
1230 msm_spi_process_transfer(dd);
1231 }
1232 } else {
1233 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1234 struct spi_transfer,
1235 transfer_list);
1236 get_transfer_length(dd);
1237 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1238 /*
1239 * Handling of multi-transfers.
1240 * FIFO mode is used by default
1241 */
1242 list_for_each_entry(dd->cur_transfer,
1243 &dd->cur_msg->transfers,
1244 transfer_list) {
1245 if (!dd->cur_transfer->len)
1246 goto error;
1247 if (xfrs_grped) {
1248 xfrs_grped--;
1249 continue;
1250 } else {
1251 dd->read_len = dd->write_len = 0;
1252 xfrs_grped = combine_transfers(dd);
1253 }
1254
1255 dd->cur_tx_transfer = dd->cur_transfer;
1256 dd->cur_rx_transfer = dd->cur_transfer;
1257 msm_spi_process_transfer(dd);
1258 xfrs_grped--;
1259 }
1260 } else {
1261 /* Handling of a single transfer or
1262 * WR-WR or WR-RD transfers
1263 */
1264 if ((!dd->cur_msg->is_dma_mapped) &&
1265 (msm_use_dm(dd, dd->cur_transfer,
1266 dd->cur_transfer->bits_per_word))) {
1267 /* Mapping of DMA buffers */
1268 int ret = msm_spi_map_dma_buffers(dd);
1269 if (ret < 0) {
1270 dd->cur_msg->status = ret;
1271 goto error;
1272 }
1273 }
1274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 dd->cur_tx_transfer = dd->cur_transfer;
1276 dd->cur_rx_transfer = dd->cur_transfer;
1277 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001280
1281 return;
1282
1283error:
1284 if (dd->cs_gpios[cs_num].valid) {
1285 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1286 dd->cs_gpios[cs_num].valid = 0;
1287 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001288}
1289
1290/* workqueue - pull messages from queue & process */
1291static void msm_spi_workq(struct work_struct *work)
1292{
1293 struct msm_spi *dd =
1294 container_of(work, struct msm_spi, work_data);
1295 unsigned long flags;
1296 u32 status_error = 0;
1297
1298 mutex_lock(&dd->core_lock);
1299
1300 /* Don't allow power collapse until we release mutex */
1301 if (pm_qos_request_active(&qos_req_list))
1302 pm_qos_update_request(&qos_req_list,
1303 dd->pm_lat);
1304 if (dd->use_rlock)
1305 remote_mutex_lock(&dd->r_lock);
1306
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001307 clk_prepare_enable(dd->clk);
1308 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309 msm_spi_enable_irqs(dd);
1310
1311 if (!msm_spi_is_valid_state(dd)) {
1312 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1313 __func__);
1314 status_error = 1;
1315 }
1316
1317 spin_lock_irqsave(&dd->queue_lock, flags);
1318 while (!list_empty(&dd->queue)) {
1319 dd->cur_msg = list_entry(dd->queue.next,
1320 struct spi_message, queue);
1321 list_del_init(&dd->cur_msg->queue);
1322 spin_unlock_irqrestore(&dd->queue_lock, flags);
1323 if (status_error)
1324 dd->cur_msg->status = -EIO;
1325 else
1326 msm_spi_process_message(dd);
1327 if (dd->cur_msg->complete)
1328 dd->cur_msg->complete(dd->cur_msg->context);
1329 spin_lock_irqsave(&dd->queue_lock, flags);
1330 }
1331 dd->transfer_pending = 0;
1332 spin_unlock_irqrestore(&dd->queue_lock, flags);
1333
1334 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001335 clk_disable_unprepare(dd->clk);
1336 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337
1338 if (dd->use_rlock)
1339 remote_mutex_unlock(&dd->r_lock);
1340
1341 if (pm_qos_request_active(&qos_req_list))
1342 pm_qos_update_request(&qos_req_list,
1343 PM_QOS_DEFAULT_VALUE);
1344
1345 mutex_unlock(&dd->core_lock);
1346 /* If needed, this can be done after the current message is complete,
1347 and work can be continued upon resume. No motivation for now. */
1348 if (dd->suspended)
1349 wake_up_interruptible(&dd->continue_suspend);
1350}
1351
1352static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1353{
1354 struct msm_spi *dd;
1355 unsigned long flags;
1356 struct spi_transfer *tr;
1357
1358 dd = spi_master_get_devdata(spi->master);
1359 if (dd->suspended)
1360 return -EBUSY;
1361
1362 if (list_empty(&msg->transfers) || !msg->complete)
1363 return -EINVAL;
1364
1365 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1366 /* Check message parameters */
1367 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1368 (tr->bits_per_word &&
1369 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1370 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1371 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1372 "tx=%p, rx=%p\n",
1373 tr->speed_hz, tr->bits_per_word,
1374 tr->tx_buf, tr->rx_buf);
1375 return -EINVAL;
1376 }
1377 }
1378
1379 spin_lock_irqsave(&dd->queue_lock, flags);
1380 if (dd->suspended) {
1381 spin_unlock_irqrestore(&dd->queue_lock, flags);
1382 return -EBUSY;
1383 }
1384 dd->transfer_pending = 1;
1385 list_add_tail(&msg->queue, &dd->queue);
1386 spin_unlock_irqrestore(&dd->queue_lock, flags);
1387 queue_work(dd->workqueue, &dd->work_data);
1388 return 0;
1389}
1390
1391static int msm_spi_setup(struct spi_device *spi)
1392{
1393 struct msm_spi *dd;
1394 int rc = 0;
1395 u32 spi_ioc;
1396 u32 spi_config;
1397 u32 mask;
1398
1399 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1400 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1401 __func__, spi->bits_per_word);
1402 rc = -EINVAL;
1403 }
1404 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1405 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1406 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1407 rc = -EINVAL;
1408 }
1409
1410 if (rc)
1411 goto err_setup_exit;
1412
1413 dd = spi_master_get_devdata(spi->master);
1414
1415 mutex_lock(&dd->core_lock);
1416 if (dd->suspended) {
1417 mutex_unlock(&dd->core_lock);
1418 return -EBUSY;
1419 }
1420
1421 if (dd->use_rlock)
1422 remote_mutex_lock(&dd->r_lock);
1423
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001424 clk_prepare_enable(dd->clk);
1425 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426
1427 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1428 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1429 if (spi->mode & SPI_CS_HIGH)
1430 spi_ioc |= mask;
1431 else
1432 spi_ioc &= ~mask;
1433 if (spi->mode & SPI_CPOL)
1434 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1435 else
1436 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1437
1438 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1439
1440 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1441 if (spi->mode & SPI_LOOP)
1442 spi_config |= SPI_CFG_LOOPBACK;
1443 else
1444 spi_config &= ~SPI_CFG_LOOPBACK;
1445 if (spi->mode & SPI_CPHA)
1446 spi_config &= ~SPI_CFG_INPUT_FIRST;
1447 else
1448 spi_config |= SPI_CFG_INPUT_FIRST;
1449 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1450
1451 /* Ensure previous write completed before disabling the clocks */
1452 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001453 clk_disable_unprepare(dd->clk);
1454 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455
1456 if (dd->use_rlock)
1457 remote_mutex_unlock(&dd->r_lock);
1458 mutex_unlock(&dd->core_lock);
1459
1460err_setup_exit:
1461 return rc;
1462}
1463
1464#ifdef CONFIG_DEBUG_FS
1465static int debugfs_iomem_x32_set(void *data, u64 val)
1466{
1467 writel_relaxed(val, data);
1468 /* Ensure the previous write completed. */
1469 mb();
1470 return 0;
1471}
1472
1473static int debugfs_iomem_x32_get(void *data, u64 *val)
1474{
1475 *val = readl_relaxed(data);
1476 /* Ensure the previous read completed. */
1477 mb();
1478 return 0;
1479}
1480
1481DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1482 debugfs_iomem_x32_set, "0x%08llx\n");
1483
1484static void spi_debugfs_init(struct msm_spi *dd)
1485{
1486 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1487 if (dd->dent_spi) {
1488 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001489
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1491 dd->debugfs_spi_regs[i] =
1492 debugfs_create_file(
1493 debugfs_spi_regs[i].name,
1494 debugfs_spi_regs[i].mode,
1495 dd->dent_spi,
1496 dd->base + debugfs_spi_regs[i].offset,
1497 &fops_iomem_x32);
1498 }
1499 }
1500}
1501
1502static void spi_debugfs_exit(struct msm_spi *dd)
1503{
1504 if (dd->dent_spi) {
1505 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507 debugfs_remove_recursive(dd->dent_spi);
1508 dd->dent_spi = NULL;
1509 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1510 dd->debugfs_spi_regs[i] = NULL;
1511 }
1512}
1513#else
1514static void spi_debugfs_init(struct msm_spi *dd) {}
1515static void spi_debugfs_exit(struct msm_spi *dd) {}
1516#endif
1517
1518/* ===Device attributes begin=== */
1519static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1520 char *buf)
1521{
1522 struct spi_master *master = dev_get_drvdata(dev);
1523 struct msm_spi *dd = spi_master_get_devdata(master);
1524
1525 return snprintf(buf, PAGE_SIZE,
1526 "Device %s\n"
1527 "rx fifo_size = %d spi words\n"
1528 "tx fifo_size = %d spi words\n"
1529 "use_dma ? %s\n"
1530 "rx block size = %d bytes\n"
1531 "tx block size = %d bytes\n"
1532 "burst size = %d bytes\n"
1533 "DMA configuration:\n"
1534 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1535 "--statistics--\n"
1536 "Rx isrs = %d\n"
1537 "Tx isrs = %d\n"
1538 "DMA error = %d\n"
1539 "--debug--\n"
1540 "NA yet\n",
1541 dev_name(dev),
1542 dd->input_fifo_size,
1543 dd->output_fifo_size,
1544 dd->use_dma ? "yes" : "no",
1545 dd->input_block_size,
1546 dd->output_block_size,
1547 dd->burst_size,
1548 dd->tx_dma_chan,
1549 dd->rx_dma_chan,
1550 dd->tx_dma_crci,
1551 dd->rx_dma_crci,
1552 dd->stat_rx + dd->stat_dmov_rx,
1553 dd->stat_tx + dd->stat_dmov_tx,
1554 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1555 );
1556}
1557
1558/* Reset statistics on write */
1559static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1560 const char *buf, size_t count)
1561{
1562 struct msm_spi *dd = dev_get_drvdata(dev);
1563 dd->stat_rx = 0;
1564 dd->stat_tx = 0;
1565 dd->stat_dmov_rx = 0;
1566 dd->stat_dmov_tx = 0;
1567 dd->stat_dmov_rx_err = 0;
1568 dd->stat_dmov_tx_err = 0;
1569 return count;
1570}
1571
1572static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1573
1574static struct attribute *dev_attrs[] = {
1575 &dev_attr_stats.attr,
1576 NULL,
1577};
1578
1579static struct attribute_group dev_attr_grp = {
1580 .attrs = dev_attrs,
1581};
1582/* ===Device attributes end=== */
1583
1584/**
1585 * spi_dmov_tx_complete_func - DataMover tx completion callback
1586 *
1587 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1588 * spinlock @msm_dmov_lock held.
1589 */
1590static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1591 unsigned int result,
1592 struct msm_dmov_errdata *err)
1593{
1594 struct msm_spi *dd;
1595
1596 if (!(result & DMOV_RSLT_VALID)) {
1597 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1598 return;
1599 }
1600 /* restore original context */
1601 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301602 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001603 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301604 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1605 return;
1606 complete(&dd->transfer_complete);
1607 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608 /* Error or flush */
1609 if (result & DMOV_RSLT_ERROR) {
1610 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1611 dd->stat_dmov_tx_err++;
1612 }
1613 if (result & DMOV_RSLT_FLUSH) {
1614 /*
1615 * Flushing normally happens in process of
1616 * removing, when we are waiting for outstanding
1617 * DMA commands to be flushed.
1618 */
1619 dev_info(dd->dev,
1620 "DMA channel flushed (0x%08x)\n", result);
1621 }
1622 if (err)
1623 dev_err(dd->dev,
1624 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1625 err->flush[0], err->flush[1], err->flush[2],
1626 err->flush[3], err->flush[4], err->flush[5]);
1627 dd->cur_msg->status = -EIO;
1628 complete(&dd->transfer_complete);
1629 }
1630}
1631
1632/**
1633 * spi_dmov_rx_complete_func - DataMover rx completion callback
1634 *
1635 * Executed in IRQ context (Data Mover's IRQ)
1636 * DataMover's spinlock @msm_dmov_lock held.
1637 */
1638static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1639 unsigned int result,
1640 struct msm_dmov_errdata *err)
1641{
1642 struct msm_spi *dd;
1643
1644 if (!(result & DMOV_RSLT_VALID)) {
1645 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1646 result, cmd);
1647 return;
1648 }
1649 /* restore original context */
1650 dd = container_of(cmd, struct msm_spi, rx_hdr);
1651 if (result & DMOV_RSLT_DONE) {
1652 dd->stat_dmov_rx++;
1653 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1654 return;
1655 complete(&dd->transfer_complete);
1656 } else {
1657 /** Error or flush */
1658 if (result & DMOV_RSLT_ERROR) {
1659 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1660 dd->stat_dmov_rx_err++;
1661 }
1662 if (result & DMOV_RSLT_FLUSH) {
1663 dev_info(dd->dev,
1664 "DMA channel flushed(0x%08x)\n", result);
1665 }
1666 if (err)
1667 dev_err(dd->dev,
1668 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1669 err->flush[0], err->flush[1], err->flush[2],
1670 err->flush[3], err->flush[4], err->flush[5]);
1671 dd->cur_msg->status = -EIO;
1672 complete(&dd->transfer_complete);
1673 }
1674}
1675
1676static inline u32 get_chunk_size(struct msm_spi *dd)
1677{
1678 u32 cache_line = dma_get_cache_alignment();
1679
1680 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1681 roundup(dd->burst_size, cache_line))*2;
1682}
1683
1684static void msm_spi_teardown_dma(struct msm_spi *dd)
1685{
1686 int limit = 0;
1687
1688 if (!dd->use_dma)
1689 return;
1690
1691 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001692 msm_dmov_flush(dd->tx_dma_chan, 1);
1693 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 msleep(10);
1695 }
1696
1697 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1698 dd->tx_dmov_cmd_dma);
1699 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1700 dd->tx_padding = dd->rx_padding = NULL;
1701}
1702
1703static __init int msm_spi_init_dma(struct msm_spi *dd)
1704{
1705 dmov_box *box;
1706 u32 cache_line = dma_get_cache_alignment();
1707
1708 /* Allocate all as one chunk, since all is smaller than page size */
1709
1710 /* We send NULL device, since it requires coherent_dma_mask id
1711 device definition, we're okay with using system pool */
1712 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1713 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1714 if (dd->tx_dmov_cmd == NULL)
1715 return -ENOMEM;
1716
1717 /* DMA addresses should be 64 bit aligned aligned */
1718 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1719 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1720 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1721 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1722
1723 /* Buffers should be aligned to cache line */
1724 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1725 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1726 sizeof(struct spi_dmov_cmd), cache_line);
1727 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1728 cache_line);
1729 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1730 cache_line);
1731
1732 /* Setup DM commands */
1733 box = &(dd->rx_dmov_cmd->box);
1734 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1735 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1736 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1737 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1738 offsetof(struct spi_dmov_cmd, cmd_ptr));
1739 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740
1741 box = &(dd->tx_dmov_cmd->box);
1742 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1743 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1744 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1745 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1746 offsetof(struct spi_dmov_cmd, cmd_ptr));
1747 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001748
1749 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1750 CMD_DST_CRCI(dd->tx_dma_crci);
1751 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1752 SPI_OUTPUT_FIFO;
1753 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1754 CMD_SRC_CRCI(dd->rx_dma_crci);
1755 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1756 SPI_INPUT_FIFO;
1757
1758 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001759 msm_dmov_flush(dd->tx_dma_chan, 1);
1760 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001761
1762 return 0;
1763}
1764
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001765struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1766{
1767 struct device_node *node = pdev->dev.of_node;
1768 struct msm_spi_platform_data *pdata;
1769
1770 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1771 if (!pdata) {
1772 pr_err("Unable to allocate platform data\n");
1773 return NULL;
1774 }
1775
1776 of_property_read_u32(node, "spi-max-frequency",
1777 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05301778 of_property_read_u32(node, "infinite_mode",
1779 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001780
1781 return pdata;
1782}
1783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784static int __init msm_spi_probe(struct platform_device *pdev)
1785{
1786 struct spi_master *master;
1787 struct msm_spi *dd;
1788 struct resource *resource;
1789 int rc = -ENXIO;
1790 int locked = 0;
1791 int i = 0;
1792 int clk_enabled = 0;
1793 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001794 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001795 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001796
1797 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1798 if (!master) {
1799 rc = -ENOMEM;
1800 dev_err(&pdev->dev, "master allocation failed\n");
1801 goto err_probe_exit;
1802 }
1803
1804 master->bus_num = pdev->id;
1805 master->mode_bits = SPI_SUPPORTED_MODES;
1806 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1807 master->setup = msm_spi_setup;
1808 master->transfer = msm_spi_transfer;
1809 platform_set_drvdata(pdev, master);
1810 dd = spi_master_get_devdata(master);
1811
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001812 if (pdev->dev.of_node) {
1813 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1814 master->dev.of_node = pdev->dev.of_node;
1815 pdata = msm_spi_dt_to_pdata(pdev);
1816 if (!pdata) {
1817 rc = -ENOMEM;
1818 goto err_probe_exit;
1819 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001820
1821 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1822 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1823 i, &flags);
1824 }
1825
1826 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1827 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1828 pdev->dev.of_node, "cs-gpios",
1829 i, &flags);
1830 dd->cs_gpios[i].valid = 0;
1831 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001832 } else {
1833 pdata = pdev->dev.platform_data;
1834 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001835
1836 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1837 resource = platform_get_resource(pdev, IORESOURCE_IO,
1838 i);
1839 dd->spi_gpios[i] = resource ? resource->start : -1;
1840 }
1841
1842 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1843 resource = platform_get_resource(pdev, IORESOURCE_IO,
1844 i + ARRAY_SIZE(spi_rsrcs));
1845 dd->cs_gpios[i].gpio_num = resource ?
1846 resource->start : -1;
1847 dd->cs_gpios[i].valid = 0;
1848 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001849 }
1850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001852 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853 if (!resource) {
1854 rc = -ENXIO;
1855 goto err_probe_res;
1856 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001857
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858 dd->mem_phys_addr = resource->start;
1859 dd->mem_size = resource_size(resource);
1860
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 if (pdata) {
1862 if (pdata->dma_config) {
1863 rc = pdata->dma_config();
1864 if (rc) {
1865 dev_warn(&pdev->dev,
1866 "%s: DM mode not supported\n",
1867 __func__);
1868 dd->use_dma = 0;
1869 goto skip_dma_resources;
1870 }
1871 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001872 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001873 if (resource) {
1874 dd->rx_dma_chan = resource->start;
1875 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001876 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1877 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 if (!resource) {
1879 rc = -ENXIO;
1880 goto err_probe_res;
1881 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001883 dd->rx_dma_crci = resource->start;
1884 dd->tx_dma_crci = resource->end;
1885 dd->use_dma = 1;
1886 master->dma_alignment = dma_get_cache_alignment();
1887 }
1888
1889skip_dma_resources:
1890 if (pdata->gpio_config) {
1891 rc = pdata->gpio_config();
1892 if (rc) {
1893 dev_err(&pdev->dev,
1894 "%s: error configuring GPIOs\n",
1895 __func__);
1896 goto err_probe_gpio;
1897 }
1898 }
1899 }
1900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 rc = msm_spi_request_gpios(dd);
1902 if (rc)
1903 goto err_probe_gpio;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 spin_lock_init(&dd->queue_lock);
1906 mutex_init(&dd->core_lock);
1907 INIT_LIST_HEAD(&dd->queue);
1908 INIT_WORK(&dd->work_data, msm_spi_workq);
1909 init_waitqueue_head(&dd->continue_suspend);
1910 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001911 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001912 if (!dd->workqueue)
1913 goto err_probe_workq;
1914
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001915 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1916 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 rc = -ENXIO;
1918 goto err_probe_reqmem;
1919 }
1920
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001921 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1922 if (!dd->base) {
1923 rc = -ENOMEM;
1924 goto err_probe_reqmem;
1925 }
1926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001927 if (pdata && pdata->rsl_id) {
1928 struct remote_mutex_id rmid;
1929 rmid.r_spinlock_id = pdata->rsl_id;
1930 rmid.delay_us = SPI_TRYLOCK_DELAY;
1931
1932 rc = remote_mutex_init(&dd->r_lock, &rmid);
1933 if (rc) {
1934 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1935 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1936 __func__, rc);
1937 goto err_probe_rlock_init;
1938 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 dd->use_rlock = 1;
1941 dd->pm_lat = pdata->pm_lat;
1942 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1943 PM_QOS_DEFAULT_VALUE);
1944 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001945
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 mutex_lock(&dd->core_lock);
1947 if (dd->use_rlock)
1948 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001950 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001951 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001952 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001954 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 rc = PTR_ERR(dd->clk);
1956 goto err_probe_clk_get;
1957 }
1958
Matt Wagantallac294852011-08-17 15:44:58 -07001959 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001961 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962 rc = PTR_ERR(dd->pclk);
1963 goto err_probe_pclk_get;
1964 }
1965
1966 if (pdata && pdata->max_clock_speed)
1967 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
1968
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001969 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001970 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001971 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972 __func__);
1973 goto err_probe_clk_enable;
1974 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001976 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001977 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001979 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 __func__);
1981 goto err_probe_pclk_enable;
1982 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001985 rc = msm_spi_configure_gsbi(dd, pdev);
1986 if (rc)
1987 goto err_probe_gsbi;
1988
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001989 msm_spi_calculate_fifo_size(dd);
1990 if (dd->use_dma) {
1991 rc = msm_spi_init_dma(dd);
1992 if (rc)
1993 goto err_probe_dma;
1994 }
1995
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001996 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 /*
1998 * The SPI core generates a bogus input overrun error on some targets,
1999 * when a transition from run to reset state occurs and if the FIFO has
2000 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2001 * bit.
2002 */
2003 msm_spi_enable_error_flags(dd);
2004
2005 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2006 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2007 if (rc)
2008 goto err_probe_state;
2009
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002010 clk_disable_unprepare(dd->clk);
2011 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012 clk_enabled = 0;
2013 pclk_enabled = 0;
2014
2015 dd->suspended = 0;
2016 dd->transfer_pending = 0;
2017 dd->multi_xfr = 0;
2018 dd->mode = SPI_MODE_NONE;
2019
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002020 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002021 if (rc)
2022 goto err_probe_irq;
2023
2024 msm_spi_disable_irqs(dd);
2025 if (dd->use_rlock)
2026 remote_mutex_unlock(&dd->r_lock);
2027
2028 mutex_unlock(&dd->core_lock);
2029 locked = 0;
2030
2031 rc = spi_register_master(master);
2032 if (rc)
2033 goto err_probe_reg_master;
2034
2035 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2036 if (rc) {
2037 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2038 goto err_attrs;
2039 }
2040
2041 spi_debugfs_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042 return 0;
2043
2044err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002045 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047err_probe_irq:
2048err_probe_state:
2049 msm_spi_teardown_dma(dd);
2050err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002051err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002053 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002054err_probe_pclk_enable:
2055 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002056 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057err_probe_clk_enable:
2058 clk_put(dd->pclk);
2059err_probe_pclk_get:
2060 clk_put(dd->clk);
2061err_probe_clk_get:
2062 if (locked) {
2063 if (dd->use_rlock)
2064 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066 mutex_unlock(&dd->core_lock);
2067 }
2068err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069err_probe_reqmem:
2070 destroy_workqueue(dd->workqueue);
2071err_probe_workq:
2072 msm_spi_free_gpios(dd);
2073err_probe_gpio:
2074 if (pdata && pdata->gpio_release)
2075 pdata->gpio_release();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002076err_probe_res:
2077 spi_master_put(master);
2078err_probe_exit:
2079 return rc;
2080}
2081
2082#ifdef CONFIG_PM
2083static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2084{
2085 struct spi_master *master = platform_get_drvdata(pdev);
2086 struct msm_spi *dd;
2087 unsigned long flags;
2088
2089 if (!master)
2090 goto suspend_exit;
2091 dd = spi_master_get_devdata(master);
2092 if (!dd)
2093 goto suspend_exit;
2094
2095 /* Make sure nothing is added to the queue while we're suspending */
2096 spin_lock_irqsave(&dd->queue_lock, flags);
2097 dd->suspended = 1;
2098 spin_unlock_irqrestore(&dd->queue_lock, flags);
2099
2100 /* Wait for transactions to end, or time out */
2101 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2102 msm_spi_free_gpios(dd);
2103
2104suspend_exit:
2105 return 0;
2106}
2107
2108static int msm_spi_resume(struct platform_device *pdev)
2109{
2110 struct spi_master *master = platform_get_drvdata(pdev);
2111 struct msm_spi *dd;
2112
2113 if (!master)
2114 goto resume_exit;
2115 dd = spi_master_get_devdata(master);
2116 if (!dd)
2117 goto resume_exit;
2118
2119 BUG_ON(msm_spi_request_gpios(dd) != 0);
2120 dd->suspended = 0;
2121resume_exit:
2122 return 0;
2123}
2124#else
2125#define msm_spi_suspend NULL
2126#define msm_spi_resume NULL
2127#endif /* CONFIG_PM */
2128
2129static int __devexit msm_spi_remove(struct platform_device *pdev)
2130{
2131 struct spi_master *master = platform_get_drvdata(pdev);
2132 struct msm_spi *dd = spi_master_get_devdata(master);
2133 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2134
2135 pm_qos_remove_request(&qos_req_list);
2136 spi_debugfs_exit(dd);
2137 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2138
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002139 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140 if (pdata && pdata->gpio_release)
2141 pdata->gpio_release();
2142
2143 msm_spi_free_gpios(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002144 clk_put(dd->clk);
2145 clk_put(dd->pclk);
2146 destroy_workqueue(dd->workqueue);
2147 platform_set_drvdata(pdev, 0);
2148 spi_unregister_master(master);
2149 spi_master_put(master);
2150
2151 return 0;
2152}
2153
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002154static struct of_device_id msm_spi_dt_match[] = {
2155 {
2156 .compatible = "qcom,spi-qup-v2",
2157 },
2158 {}
2159};
2160
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002161static struct platform_driver msm_spi_driver = {
2162 .driver = {
2163 .name = SPI_DRV_NAME,
2164 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002165 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002166 },
2167 .suspend = msm_spi_suspend,
2168 .resume = msm_spi_resume,
2169 .remove = __exit_p(msm_spi_remove),
2170};
2171
2172static int __init msm_spi_init(void)
2173{
2174 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2175}
2176module_init(msm_spi_init);
2177
2178static void __exit msm_spi_exit(void)
2179{
2180 platform_driver_unregister(&msm_spi_driver);
2181}
2182module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002183
2184MODULE_LICENSE("GPL v2");
2185MODULE_VERSION("0.4");
2186MODULE_ALIAS("platform:"SPI_DRV_NAME);