blob: 18bc9366e401dffac1ecebd9e53b053de952141c [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070046static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
47 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048{
49 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050 unsigned long gsbi_mem_phys_addr;
51 size_t gsbi_mem_size;
52 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070056 return 0;
57
58 gsbi_mem_phys_addr = resource->start;
59 gsbi_mem_size = resource_size(resource);
60 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
61 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063
64 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size);
66 if (!gsbi_base)
67 return -ENXIO;
68
69 /* Set GSBI to SPI mode */
70 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 return 0;
73}
74
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070077 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
78 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
79 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
80 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
81 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
82 if (dd->qup_ver)
83 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084}
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086static inline int msm_spi_request_gpios(struct msm_spi *dd)
87{
88 int i;
89 int result = 0;
90
91 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
92 if (dd->spi_gpios[i] >= 0) {
93 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
94 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060095 dev_err(dd->dev, "%s: gpio_request for pin %d "
96 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 dd->spi_gpios[i], result);
98 goto error;
99 }
100 }
101 }
102 return 0;
103
104error:
105 for (; --i >= 0;) {
106 if (dd->spi_gpios[i] >= 0)
107 gpio_free(dd->spi_gpios[i]);
108 }
109 return result;
110}
111
112static inline void msm_spi_free_gpios(struct msm_spi *dd)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
117 if (dd->spi_gpios[i] >= 0)
118 gpio_free(dd->spi_gpios[i]);
119 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600120
121 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
122 if (dd->cs_gpios[i].valid) {
123 gpio_free(dd->cs_gpios[i].gpio_num);
124 dd->cs_gpios[i].valid = 0;
125 }
126 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127}
128
129static void msm_spi_clock_set(struct msm_spi *dd, int speed)
130{
131 int rc;
132
133 rc = clk_set_rate(dd->clk, speed);
134 if (!rc)
135 dd->clock_speed = speed;
136}
137
138static int msm_spi_calculate_size(int *fifo_size,
139 int *block_size,
140 int block,
141 int mult)
142{
143 int words;
144
145 switch (block) {
146 case 0:
147 words = 1; /* 4 bytes */
148 break;
149 case 1:
150 words = 4; /* 16 bytes */
151 break;
152 case 2:
153 words = 8; /* 32 bytes */
154 break;
155 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 switch (mult) {
160 case 0:
161 *fifo_size = words * 2;
162 break;
163 case 1:
164 *fifo_size = words * 4;
165 break;
166 case 2:
167 *fifo_size = words * 8;
168 break;
169 case 3:
170 *fifo_size = words * 16;
171 break;
172 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 *block_size = words * sizeof(u32); /* in bytes */
177 return 0;
178}
179
180static void get_next_transfer(struct msm_spi *dd)
181{
182 struct spi_transfer *t = dd->cur_transfer;
183
184 if (t->transfer_list.next != &dd->cur_msg->transfers) {
185 dd->cur_transfer = list_entry(t->transfer_list.next,
186 struct spi_transfer,
187 transfer_list);
188 dd->write_buf = dd->cur_transfer->tx_buf;
189 dd->read_buf = dd->cur_transfer->rx_buf;
190 }
191}
192
193static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
194{
195 u32 spi_iom;
196 int block;
197 int mult;
198
199 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
200
201 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
202 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
203 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
204 block, mult)) {
205 goto fifo_size_err;
206 }
207
208 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
209 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
210 if (msm_spi_calculate_size(&dd->output_fifo_size,
211 &dd->output_block_size, block, mult)) {
212 goto fifo_size_err;
213 }
214 /* DM mode is not available for this block size */
215 if (dd->input_block_size == 4 || dd->output_block_size == 4)
216 dd->use_dma = 0;
217
218 /* DM mode is currently unsupported for different block sizes */
219 if (dd->input_block_size != dd->output_block_size)
220 dd->use_dma = 0;
221
222 if (dd->use_dma)
223 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
224
225 return;
226
227fifo_size_err:
228 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700229 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return;
231}
232
233static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
234{
235 u32 data_in;
236 int i;
237 int shift;
238
239 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
240 if (dd->read_buf) {
241 for (i = 0; (i < dd->bytes_per_word) &&
242 dd->rx_bytes_remaining; i++) {
243 /* The data format depends on bytes_per_word:
244 4 bytes: 0x12345678
245 3 bytes: 0x00123456
246 2 bytes: 0x00001234
247 1 byte : 0x00000012
248 */
249 shift = 8 * (dd->bytes_per_word - i - 1);
250 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
251 dd->rx_bytes_remaining--;
252 }
253 } else {
254 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
255 dd->rx_bytes_remaining -= dd->bytes_per_word;
256 else
257 dd->rx_bytes_remaining = 0;
258 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 dd->read_xfr_cnt++;
261 if (dd->multi_xfr) {
262 if (!dd->rx_bytes_remaining)
263 dd->read_xfr_cnt = 0;
264 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
265 dd->read_len) {
266 struct spi_transfer *t = dd->cur_rx_transfer;
267 if (t->transfer_list.next != &dd->cur_msg->transfers) {
268 t = list_entry(t->transfer_list.next,
269 struct spi_transfer,
270 transfer_list);
271 dd->read_buf = t->rx_buf;
272 dd->read_len = t->len;
273 dd->read_xfr_cnt = 0;
274 dd->cur_rx_transfer = t;
275 }
276 }
277 }
278}
279
280static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
281{
282 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
283
284 return spi_op & SPI_OP_STATE_VALID;
285}
286
287static inline int msm_spi_wait_valid(struct msm_spi *dd)
288{
289 unsigned long delay = 0;
290 unsigned long timeout = 0;
291
292 if (dd->clock_speed == 0)
293 return -EINVAL;
294 /*
295 * Based on the SPI clock speed, sufficient time
296 * should be given for the SPI state transition
297 * to occur
298 */
299 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
300 /*
301 * For small delay values, the default timeout would
302 * be one jiffy
303 */
304 if (delay < SPI_DELAY_THRESHOLD)
305 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600306
307 /* Adding one to round off to the nearest jiffy */
308 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 while (!msm_spi_is_valid_state(dd)) {
310 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600311 if (!msm_spi_is_valid_state(dd)) {
312 if (dd->cur_msg)
313 dd->cur_msg->status = -EIO;
314 dev_err(dd->dev, "%s: SPI operational state"
315 "not valid\n", __func__);
316 return -ETIMEDOUT;
317 } else
318 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 }
320 /*
321 * For smaller values of delay, context switch time
322 * would negate the usage of usleep
323 */
324 if (delay > 20)
325 usleep(delay);
326 else if (delay)
327 udelay(delay);
328 }
329 return 0;
330}
331
332static inline int msm_spi_set_state(struct msm_spi *dd,
333 enum msm_spi_state state)
334{
335 enum msm_spi_state cur_state;
336 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700337 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338 cur_state = readl_relaxed(dd->base + SPI_STATE);
339 /* Per spec:
340 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
341 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
342 (state == SPI_OP_STATE_RESET)) {
343 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
344 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
345 } else {
346 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
347 dd->base + SPI_STATE);
348 }
349 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700350 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351
352 return 0;
353}
354
355static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
356{
357 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
358
359 if (n != (*config & SPI_CFG_N))
360 *config = (*config & ~SPI_CFG_N) | n;
361
362 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
363 if (dd->read_buf == NULL)
364 *config |= SPI_NO_INPUT;
365 if (dd->write_buf == NULL)
366 *config |= SPI_NO_OUTPUT;
367 }
368}
369
370static void msm_spi_set_config(struct msm_spi *dd, int bpw)
371{
372 u32 spi_config;
373
374 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
375
376 if (dd->cur_msg->spi->mode & SPI_CPHA)
377 spi_config &= ~SPI_CFG_INPUT_FIRST;
378 else
379 spi_config |= SPI_CFG_INPUT_FIRST;
380 if (dd->cur_msg->spi->mode & SPI_LOOP)
381 spi_config |= SPI_CFG_LOOPBACK;
382 else
383 spi_config &= ~SPI_CFG_LOOPBACK;
384 msm_spi_add_configs(dd, &spi_config, bpw-1);
385 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
386 msm_spi_set_qup_config(dd, bpw);
387}
388
389static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
390{
391 dmov_box *box;
392 int bytes_to_send, num_rows, bytes_sent;
393 u32 num_transfers;
394
395 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530396 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 if (dd->write_len && !dd->read_len) {
398 /* WR-WR transfer */
399 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
400 dd->write_buf = dd->temp_buf;
401 } else {
402 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
403 /* For WR-RD transfer, bytes_sent can be negative */
404 if (bytes_sent < 0)
405 bytes_sent = 0;
406 }
407
Kiran Gundae8f16742012-06-27 10:06:32 +0530408 /* We'll send in chunks of SPI_MAX_LEN if larger than
409 * 4K bytes for targets that doesn't support infinite
410 * mode. Make sure this doesn't happen on targets that
411 * support infinite mode.
412 */
413 if (!dd->pdata->infinite_mode)
414 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
415 SPI_MAX_LEN : dd->tx_bytes_remaining;
416 else
417 bytes_to_send = dd->tx_bytes_remaining;
418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
420 dd->unaligned_len = bytes_to_send % dd->burst_size;
421 num_rows = bytes_to_send / dd->burst_size;
422
423 dd->mode = SPI_DMOV_MODE;
424
425 if (num_rows) {
426 /* src in 16 MSB, dst in 16 LSB */
427 box = &dd->tx_dmov_cmd->box;
428 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
429 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
430 box->num_rows = (num_rows << 16) | num_rows;
431 box->row_offset = (dd->burst_size << 16) | 0;
432
433 box = &dd->rx_dmov_cmd->box;
434 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
435 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
436 box->num_rows = (num_rows << 16) | num_rows;
437 box->row_offset = (0 << 16) | dd->burst_size;
438
439 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
440 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
441 offsetof(struct spi_dmov_cmd, box));
442 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
443 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
444 offsetof(struct spi_dmov_cmd, box));
445 } else {
446 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
447 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
448 offsetof(struct spi_dmov_cmd, single_pad));
449 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
450 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
451 offsetof(struct spi_dmov_cmd, single_pad));
452 }
453
454 if (!dd->unaligned_len) {
455 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
456 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
457 } else {
458 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
459 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
460 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
461
462 if ((dd->multi_xfr) && (dd->read_len <= 0))
463 offset = dd->cur_msg_len - dd->unaligned_len;
464
465 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
466 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
467
468 memset(dd->tx_padding, 0, dd->burst_size);
469 memset(dd->rx_padding, 0, dd->burst_size);
470 if (dd->write_buf)
471 memcpy(dd->tx_padding, dd->write_buf + offset,
472 dd->unaligned_len);
473
474 tx_cmd->src = dd->tx_padding_dma;
475 rx_cmd->dst = dd->rx_padding_dma;
476 tx_cmd->len = rx_cmd->len = dd->burst_size;
477 }
478 /* This also takes care of the padding dummy buf
479 Since this is set to the correct length, the
480 dummy bytes won't be actually sent */
481 if (dd->multi_xfr) {
482 u32 write_transfers = 0;
483 u32 read_transfers = 0;
484
485 if (dd->write_len > 0) {
486 write_transfers = DIV_ROUND_UP(dd->write_len,
487 dd->bytes_per_word);
488 writel_relaxed(write_transfers,
489 dd->base + SPI_MX_OUTPUT_COUNT);
490 }
491 if (dd->read_len > 0) {
492 /*
493 * The read following a write transfer must take
494 * into account, that the bytes pertaining to
495 * the write transfer needs to be discarded,
496 * before the actual read begins.
497 */
498 read_transfers = DIV_ROUND_UP(dd->read_len +
499 dd->write_len,
500 dd->bytes_per_word);
501 writel_relaxed(read_transfers,
502 dd->base + SPI_MX_INPUT_COUNT);
503 }
504 } else {
505 if (dd->write_buf)
506 writel_relaxed(num_transfers,
507 dd->base + SPI_MX_OUTPUT_COUNT);
508 if (dd->read_buf)
509 writel_relaxed(num_transfers,
510 dd->base + SPI_MX_INPUT_COUNT);
511 }
512}
513
514static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
515{
516 dma_coherent_pre_ops();
517 if (dd->write_buf)
518 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
519 if (dd->read_buf)
520 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
521}
522
Kiran Gundae8f16742012-06-27 10:06:32 +0530523/* SPI core on targets that does not support infinite mode can send maximum of
524 4K transfers, Therefore, we are sending several chunks of 3K or less
525 (depending on how much is left). Upon completion we send the next chunk,
526 or complete the transfer if everything is finished. On targets that support
527 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528*/
529static int msm_spi_dm_send_next(struct msm_spi *dd)
530{
531 /* By now we should have sent all the bytes in FIFO mode,
532 * However to make things right, we'll check anyway.
533 */
534 if (dd->mode != SPI_DMOV_MODE)
535 return 0;
536
Kiran Gundae8f16742012-06-27 10:06:32 +0530537 /* On targets which does not support infinite mode,
538 We need to send more chunks, if we sent max last time */
539 if ((!dd->pdata->infinite_mode) &&
540 (dd->tx_bytes_remaining > SPI_MAX_LEN)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 dd->tx_bytes_remaining -= SPI_MAX_LEN;
542 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
543 return 0;
544 dd->read_len = dd->write_len = 0;
545 msm_spi_setup_dm_transfer(dd);
546 msm_spi_enqueue_dm_commands(dd);
547 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
548 return 0;
549 return 1;
550 } else if (dd->read_len && dd->write_len) {
551 dd->tx_bytes_remaining -= dd->cur_transfer->len;
552 if (list_is_last(&dd->cur_transfer->transfer_list,
553 &dd->cur_msg->transfers))
554 return 0;
555 get_next_transfer(dd);
556 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
557 return 0;
558 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
559 dd->read_buf = dd->temp_buf;
560 dd->read_len = dd->write_len = -1;
561 msm_spi_setup_dm_transfer(dd);
562 msm_spi_enqueue_dm_commands(dd);
563 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
564 return 0;
565 return 1;
566 }
567 return 0;
568}
569
570static inline void msm_spi_ack_transfer(struct msm_spi *dd)
571{
572 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
573 SPI_OP_MAX_OUTPUT_DONE_FLAG,
574 dd->base + SPI_OPERATIONAL);
575 /* Ensure done flag was cleared before proceeding further */
576 mb();
577}
578
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700579/* Figure which irq occured and call the relevant functions */
580static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
581{
582 u32 op, ret = IRQ_NONE;
583 struct msm_spi *dd = dev_id;
584
585 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
586 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
587 struct spi_master *master = dev_get_drvdata(dd->dev);
588 ret |= msm_spi_error_irq(irq, master);
589 }
590
591 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
592 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
593 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
594 dd->base + SPI_OPERATIONAL);
595 /*
596 * Ensure service flag was cleared before further
597 * processing of interrupt.
598 */
599 mb();
600 ret |= msm_spi_input_irq(irq, dev_id);
601 }
602
603 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
604 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
605 dd->base + SPI_OPERATIONAL);
606 /*
607 * Ensure service flag was cleared before further
608 * processing of interrupt.
609 */
610 mb();
611 ret |= msm_spi_output_irq(irq, dev_id);
612 }
613
614 if (dd->done) {
615 complete(&dd->transfer_complete);
616 dd->done = 0;
617 }
618 return ret;
619}
620
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
622{
623 struct msm_spi *dd = dev_id;
624
625 dd->stat_rx++;
626
627 if (dd->mode == SPI_MODE_NONE)
628 return IRQ_HANDLED;
629
630 if (dd->mode == SPI_DMOV_MODE) {
631 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
632 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
633 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
634 msm_spi_ack_transfer(dd);
635 if (dd->unaligned_len == 0) {
636 if (atomic_inc_return(&dd->rx_irq_called) == 1)
637 return IRQ_HANDLED;
638 }
639 msm_spi_complete(dd);
640 return IRQ_HANDLED;
641 }
642 return IRQ_NONE;
643 }
644
645 if (dd->mode == SPI_FIFO_MODE) {
646 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
647 SPI_OP_IP_FIFO_NOT_EMPTY) &&
648 (dd->rx_bytes_remaining > 0)) {
649 msm_spi_read_word_from_fifo(dd);
650 }
651 if (dd->rx_bytes_remaining == 0)
652 msm_spi_complete(dd);
653 }
654
655 return IRQ_HANDLED;
656}
657
658static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
659{
660 u32 word;
661 u8 byte;
662 int i;
663
664 word = 0;
665 if (dd->write_buf) {
666 for (i = 0; (i < dd->bytes_per_word) &&
667 dd->tx_bytes_remaining; i++) {
668 dd->tx_bytes_remaining--;
669 byte = *dd->write_buf++;
670 word |= (byte << (BITS_PER_BYTE * (3 - i)));
671 }
672 } else
673 if (dd->tx_bytes_remaining > dd->bytes_per_word)
674 dd->tx_bytes_remaining -= dd->bytes_per_word;
675 else
676 dd->tx_bytes_remaining = 0;
677 dd->write_xfr_cnt++;
678 if (dd->multi_xfr) {
679 if (!dd->tx_bytes_remaining)
680 dd->write_xfr_cnt = 0;
681 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
682 dd->write_len) {
683 struct spi_transfer *t = dd->cur_tx_transfer;
684 if (t->transfer_list.next != &dd->cur_msg->transfers) {
685 t = list_entry(t->transfer_list.next,
686 struct spi_transfer,
687 transfer_list);
688 dd->write_buf = t->tx_buf;
689 dd->write_len = t->len;
690 dd->write_xfr_cnt = 0;
691 dd->cur_tx_transfer = t;
692 }
693 }
694 }
695 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
696}
697
698static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
699{
700 int count = 0;
701
702 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
703 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
704 SPI_OP_OUTPUT_FIFO_FULL)) {
705 msm_spi_write_word_to_fifo(dd);
706 count++;
707 }
708}
709
710static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
711{
712 struct msm_spi *dd = dev_id;
713
714 dd->stat_tx++;
715
716 if (dd->mode == SPI_MODE_NONE)
717 return IRQ_HANDLED;
718
719 if (dd->mode == SPI_DMOV_MODE) {
720 /* TX_ONLY transaction is handled here
721 This is the only place we send complete at tx and not rx */
722 if (dd->read_buf == NULL &&
723 readl_relaxed(dd->base + SPI_OPERATIONAL) &
724 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
725 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530726 if (atomic_inc_return(&dd->tx_irq_called) == 1)
727 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728 msm_spi_complete(dd);
729 return IRQ_HANDLED;
730 }
731 return IRQ_NONE;
732 }
733
734 /* Output FIFO is empty. Transmit any outstanding write data. */
735 if (dd->mode == SPI_FIFO_MODE)
736 msm_spi_write_rmn_to_fifo(dd);
737
738 return IRQ_HANDLED;
739}
740
741static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
742{
743 struct spi_master *master = dev_id;
744 struct msm_spi *dd = spi_master_get_devdata(master);
745 u32 spi_err;
746
747 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
748 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
749 dev_warn(master->dev.parent, "SPI output overrun error\n");
750 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
751 dev_warn(master->dev.parent, "SPI input underrun error\n");
752 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
753 dev_warn(master->dev.parent, "SPI output underrun error\n");
754 msm_spi_get_clk_err(dd, &spi_err);
755 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
756 dev_warn(master->dev.parent, "SPI clock overrun error\n");
757 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
758 dev_warn(master->dev.parent, "SPI clock underrun error\n");
759 msm_spi_clear_error_flags(dd);
760 msm_spi_ack_clk_err(dd);
761 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
762 mb();
763 return IRQ_HANDLED;
764}
765
766static int msm_spi_map_dma_buffers(struct msm_spi *dd)
767{
768 struct device *dev;
769 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600770 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 void *tx_buf, *rx_buf;
772 unsigned tx_len, rx_len;
773 int ret = -EINVAL;
774
775 dev = &dd->cur_msg->spi->dev;
776 first_xfr = dd->cur_transfer;
777 tx_buf = (void *)first_xfr->tx_buf;
778 rx_buf = first_xfr->rx_buf;
779 tx_len = rx_len = first_xfr->len;
780
781 /*
782 * For WR-WR and WR-RD transfers, we allocate our own temporary
783 * buffer and copy the data to/from the client buffers.
784 */
785 if (dd->multi_xfr) {
786 dd->temp_buf = kzalloc(dd->cur_msg_len,
787 GFP_KERNEL | __GFP_DMA);
788 if (!dd->temp_buf)
789 return -ENOMEM;
790 nxt_xfr = list_entry(first_xfr->transfer_list.next,
791 struct spi_transfer, transfer_list);
792
793 if (dd->write_len && !dd->read_len) {
794 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
795 goto error;
796
797 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
798 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
799 nxt_xfr->len);
800 tx_buf = dd->temp_buf;
801 tx_len = dd->cur_msg_len;
802 } else {
803 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
804 goto error;
805
806 rx_buf = dd->temp_buf;
807 rx_len = dd->cur_msg_len;
808 }
809 }
810 if (tx_buf != NULL) {
811 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
812 tx_len, DMA_TO_DEVICE);
813 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
814 dev_err(dev, "dma %cX %d bytes error\n",
815 'T', tx_len);
816 ret = -ENOMEM;
817 goto error;
818 }
819 }
820 if (rx_buf != NULL) {
821 dma_addr_t dma_handle;
822 dma_handle = dma_map_single(dev, rx_buf,
823 rx_len, DMA_FROM_DEVICE);
824 if (dma_mapping_error(NULL, dma_handle)) {
825 dev_err(dev, "dma %cX %d bytes error\n",
826 'R', rx_len);
827 if (tx_buf != NULL)
828 dma_unmap_single(NULL, first_xfr->tx_dma,
829 tx_len, DMA_TO_DEVICE);
830 ret = -ENOMEM;
831 goto error;
832 }
833 if (dd->multi_xfr)
834 nxt_xfr->rx_dma = dma_handle;
835 else
836 first_xfr->rx_dma = dma_handle;
837 }
838 return 0;
839
840error:
841 kfree(dd->temp_buf);
842 dd->temp_buf = NULL;
843 return ret;
844}
845
846static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
847{
848 struct device *dev;
849 u32 offset;
850
851 dev = &dd->cur_msg->spi->dev;
852 if (dd->cur_msg->is_dma_mapped)
853 goto unmap_end;
854
855 if (dd->multi_xfr) {
856 if (dd->write_len && !dd->read_len) {
857 dma_unmap_single(dev,
858 dd->cur_transfer->tx_dma,
859 dd->cur_msg_len,
860 DMA_TO_DEVICE);
861 } else {
862 struct spi_transfer *prev_xfr;
863 prev_xfr = list_entry(
864 dd->cur_transfer->transfer_list.prev,
865 struct spi_transfer,
866 transfer_list);
867 if (dd->cur_transfer->rx_buf) {
868 dma_unmap_single(dev,
869 dd->cur_transfer->rx_dma,
870 dd->cur_msg_len,
871 DMA_FROM_DEVICE);
872 }
873 if (prev_xfr->tx_buf) {
874 dma_unmap_single(dev,
875 prev_xfr->tx_dma,
876 prev_xfr->len,
877 DMA_TO_DEVICE);
878 }
879 if (dd->unaligned_len && dd->read_buf) {
880 offset = dd->cur_msg_len - dd->unaligned_len;
881 dma_coherent_post_ops();
882 memcpy(dd->read_buf + offset, dd->rx_padding,
883 dd->unaligned_len);
884 memcpy(dd->cur_transfer->rx_buf,
885 dd->read_buf + prev_xfr->len,
886 dd->cur_transfer->len);
887 }
888 }
889 kfree(dd->temp_buf);
890 dd->temp_buf = NULL;
891 return;
892 } else {
893 if (dd->cur_transfer->rx_buf)
894 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
895 dd->cur_transfer->len,
896 DMA_FROM_DEVICE);
897 if (dd->cur_transfer->tx_buf)
898 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
899 dd->cur_transfer->len,
900 DMA_TO_DEVICE);
901 }
902
903unmap_end:
904 /* If we padded the transfer, we copy it from the padding buf */
905 if (dd->unaligned_len && dd->read_buf) {
906 offset = dd->cur_transfer->len - dd->unaligned_len;
907 dma_coherent_post_ops();
908 memcpy(dd->read_buf + offset, dd->rx_padding,
909 dd->unaligned_len);
910 }
911}
912
913/**
914 * msm_use_dm - decides whether to use data mover for this
915 * transfer
916 * @dd: device
917 * @tr: transfer
918 *
919 * Start using DM if:
920 * 1. Transfer is longer than 3*block size.
921 * 2. Buffers should be aligned to cache line.
922 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
923 */
924static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
925 u8 bpw)
926{
927 u32 cache_line = dma_get_cache_alignment();
928
929 if (!dd->use_dma)
930 return 0;
931
932 if (dd->cur_msg_len < 3*dd->input_block_size)
933 return 0;
934
935 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
936 return 0;
937
938 if (tr->tx_buf) {
939 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
940 return 0;
941 }
942 if (tr->rx_buf) {
943 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
944 return 0;
945 }
946
947 if (tr->cs_change &&
948 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
949 return 0;
950 return 1;
951}
952
953static void msm_spi_process_transfer(struct msm_spi *dd)
954{
955 u8 bpw;
956 u32 spi_ioc;
957 u32 spi_iom;
958 u32 spi_ioc_orig;
959 u32 max_speed;
960 u32 chip_select;
961 u32 read_count;
962 u32 timeout;
963 u32 int_loopback = 0;
964
965 dd->tx_bytes_remaining = dd->cur_msg_len;
966 dd->rx_bytes_remaining = dd->cur_msg_len;
967 dd->read_buf = dd->cur_transfer->rx_buf;
968 dd->write_buf = dd->cur_transfer->tx_buf;
969 init_completion(&dd->transfer_complete);
970 if (dd->cur_transfer->bits_per_word)
971 bpw = dd->cur_transfer->bits_per_word;
972 else
973 if (dd->cur_msg->spi->bits_per_word)
974 bpw = dd->cur_msg->spi->bits_per_word;
975 else
976 bpw = 8;
977 dd->bytes_per_word = (bpw + 7) / 8;
978
979 if (dd->cur_transfer->speed_hz)
980 max_speed = dd->cur_transfer->speed_hz;
981 else
982 max_speed = dd->cur_msg->spi->max_speed_hz;
983 if (!dd->clock_speed || max_speed != dd->clock_speed)
984 msm_spi_clock_set(dd, max_speed);
985
986 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
987 if (dd->cur_msg->spi->mode & SPI_LOOP)
988 int_loopback = 1;
989 if (int_loopback && dd->multi_xfr &&
990 (read_count > dd->input_fifo_size)) {
991 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700992 pr_err(
993 "%s:Internal Loopback does not support > fifo size"
994 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 __func__);
996 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700997 pr_err(
998 "%s:Internal Loopback does not support > fifo size"
999 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 __func__);
1001 return;
1002 }
1003 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1004 dd->mode = SPI_FIFO_MODE;
1005 if (dd->multi_xfr) {
1006 dd->read_len = dd->cur_transfer->len;
1007 dd->write_len = dd->cur_transfer->len;
1008 }
1009 /* read_count cannot exceed fifo_size, and only one READ COUNT
1010 interrupt is generated per transaction, so for transactions
1011 larger than fifo size READ COUNT must be disabled.
1012 For those transactions we usually move to Data Mover mode.
1013 */
1014 if (read_count <= dd->input_fifo_size) {
1015 writel_relaxed(read_count,
1016 dd->base + SPI_MX_READ_COUNT);
1017 msm_spi_set_write_count(dd, read_count);
1018 } else {
1019 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1020 msm_spi_set_write_count(dd, 0);
1021 }
1022 } else {
1023 dd->mode = SPI_DMOV_MODE;
1024 if (dd->write_len && dd->read_len) {
1025 dd->tx_bytes_remaining = dd->write_len;
1026 dd->rx_bytes_remaining = dd->read_len;
1027 }
1028 }
1029
1030 /* Write mode - fifo or data mover*/
1031 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1032 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1033 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1034 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1035 /* Turn on packing for data mover */
1036 if (dd->mode == SPI_DMOV_MODE)
1037 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1038 else
1039 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1040 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1041
1042 msm_spi_set_config(dd, bpw);
1043
1044 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1045 spi_ioc_orig = spi_ioc;
1046 if (dd->cur_msg->spi->mode & SPI_CPOL)
1047 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1048 else
1049 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1050 chip_select = dd->cur_msg->spi->chip_select << 2;
1051 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1052 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1053 if (!dd->cur_transfer->cs_change)
1054 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1055 if (spi_ioc != spi_ioc_orig)
1056 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1057
1058 if (dd->mode == SPI_DMOV_MODE) {
1059 msm_spi_setup_dm_transfer(dd);
1060 msm_spi_enqueue_dm_commands(dd);
1061 }
1062 /* The output fifo interrupt handler will handle all writes after
1063 the first. Restricting this to one write avoids contention
1064 issues and race conditions between this thread and the int handler
1065 */
1066 else if (dd->mode == SPI_FIFO_MODE) {
1067 if (msm_spi_prepare_for_write(dd))
1068 goto transfer_end;
1069 msm_spi_start_write(dd, read_count);
1070 }
1071
1072 /* Only enter the RUN state after the first word is written into
1073 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1074 might fire before the first word is written resulting in a
1075 possible race condition.
1076 */
1077 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1078 goto transfer_end;
1079
1080 timeout = 100 * msecs_to_jiffies(
1081 DIV_ROUND_UP(dd->cur_msg_len * 8,
1082 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1083
1084 /* Assume success, this might change later upon transaction result */
1085 dd->cur_msg->status = 0;
1086 do {
1087 if (!wait_for_completion_timeout(&dd->transfer_complete,
1088 timeout)) {
1089 dev_err(dd->dev, "%s: SPI transaction "
1090 "timeout\n", __func__);
1091 dd->cur_msg->status = -EIO;
1092 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001093 msm_dmov_flush(dd->tx_dma_chan, 1);
1094 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 }
1096 break;
1097 }
1098 } while (msm_spi_dm_send_next(dd));
1099
1100transfer_end:
1101 if (dd->mode == SPI_DMOV_MODE)
1102 msm_spi_unmap_dma_buffers(dd);
1103 dd->mode = SPI_MODE_NONE;
1104
1105 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1106 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1107 dd->base + SPI_IO_CONTROL);
1108}
1109
1110static void get_transfer_length(struct msm_spi *dd)
1111{
1112 struct spi_transfer *tr;
1113 int num_xfrs = 0;
1114 int readlen = 0;
1115 int writelen = 0;
1116
1117 dd->cur_msg_len = 0;
1118 dd->multi_xfr = 0;
1119 dd->read_len = dd->write_len = 0;
1120
1121 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1122 if (tr->tx_buf)
1123 writelen += tr->len;
1124 if (tr->rx_buf)
1125 readlen += tr->len;
1126 dd->cur_msg_len += tr->len;
1127 num_xfrs++;
1128 }
1129
1130 if (num_xfrs == 2) {
1131 struct spi_transfer *first_xfr = dd->cur_transfer;
1132
1133 dd->multi_xfr = 1;
1134 tr = list_entry(first_xfr->transfer_list.next,
1135 struct spi_transfer,
1136 transfer_list);
1137 /*
1138 * We update dd->read_len and dd->write_len only
1139 * for WR-WR and WR-RD transfers.
1140 */
1141 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1142 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1143 ((!tr->tx_buf) && (tr->rx_buf))) {
1144 dd->read_len = readlen;
1145 dd->write_len = writelen;
1146 }
1147 }
1148 } else if (num_xfrs > 1)
1149 dd->multi_xfr = 1;
1150}
1151
1152static inline int combine_transfers(struct msm_spi *dd)
1153{
1154 struct spi_transfer *t = dd->cur_transfer;
1155 struct spi_transfer *nxt;
1156 int xfrs_grped = 1;
1157
1158 dd->cur_msg_len = dd->cur_transfer->len;
1159 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1160 nxt = list_entry(t->transfer_list.next,
1161 struct spi_transfer,
1162 transfer_list);
1163 if (t->cs_change != nxt->cs_change)
1164 return xfrs_grped;
1165 dd->cur_msg_len += nxt->len;
1166 xfrs_grped++;
1167 t = nxt;
1168 }
1169 return xfrs_grped;
1170}
1171
Harini Jayaraman093938a2012-04-20 15:33:23 -06001172static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1173{
1174 u32 spi_ioc;
1175 u32 spi_ioc_orig;
1176
1177 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1178 spi_ioc_orig = spi_ioc;
1179 if (set_flag)
1180 spi_ioc |= SPI_IO_C_FORCE_CS;
1181 else
1182 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1183
1184 if (spi_ioc != spi_ioc_orig)
1185 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1186}
1187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188static void msm_spi_process_message(struct msm_spi *dd)
1189{
1190 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001191 int cs_num;
1192 int rc;
1193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001195 cs_num = dd->cur_msg->spi->chip_select;
1196 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1197 (!(dd->cs_gpios[cs_num].valid)) &&
1198 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1199 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1200 spi_cs_rsrcs[cs_num]);
1201 if (rc) {
1202 dev_err(dd->dev, "gpio_request for pin %d failed with "
1203 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1204 rc);
1205 return;
1206 }
1207 dd->cs_gpios[cs_num].valid = 1;
1208 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001210 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001211 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001213 &dd->cur_msg->transfers,
1214 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001215 struct spi_transfer *t = dd->cur_transfer;
1216 struct spi_transfer *nxt;
1217
1218 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1219 nxt = list_entry(t->transfer_list.next,
1220 struct spi_transfer,
1221 transfer_list);
1222
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001223 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001224 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001225 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001226 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001228
1229 dd->cur_msg_len = dd->cur_transfer->len;
1230 msm_spi_process_transfer(dd);
1231 }
1232 } else {
1233 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1234 struct spi_transfer,
1235 transfer_list);
1236 get_transfer_length(dd);
1237 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1238 /*
1239 * Handling of multi-transfers.
1240 * FIFO mode is used by default
1241 */
1242 list_for_each_entry(dd->cur_transfer,
1243 &dd->cur_msg->transfers,
1244 transfer_list) {
1245 if (!dd->cur_transfer->len)
1246 goto error;
1247 if (xfrs_grped) {
1248 xfrs_grped--;
1249 continue;
1250 } else {
1251 dd->read_len = dd->write_len = 0;
1252 xfrs_grped = combine_transfers(dd);
1253 }
1254
1255 dd->cur_tx_transfer = dd->cur_transfer;
1256 dd->cur_rx_transfer = dd->cur_transfer;
1257 msm_spi_process_transfer(dd);
1258 xfrs_grped--;
1259 }
1260 } else {
1261 /* Handling of a single transfer or
1262 * WR-WR or WR-RD transfers
1263 */
1264 if ((!dd->cur_msg->is_dma_mapped) &&
1265 (msm_use_dm(dd, dd->cur_transfer,
1266 dd->cur_transfer->bits_per_word))) {
1267 /* Mapping of DMA buffers */
1268 int ret = msm_spi_map_dma_buffers(dd);
1269 if (ret < 0) {
1270 dd->cur_msg->status = ret;
1271 goto error;
1272 }
1273 }
1274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 dd->cur_tx_transfer = dd->cur_transfer;
1276 dd->cur_rx_transfer = dd->cur_transfer;
1277 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001280
1281 return;
1282
1283error:
1284 if (dd->cs_gpios[cs_num].valid) {
1285 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1286 dd->cs_gpios[cs_num].valid = 0;
1287 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001288}
1289
1290/* workqueue - pull messages from queue & process */
1291static void msm_spi_workq(struct work_struct *work)
1292{
1293 struct msm_spi *dd =
1294 container_of(work, struct msm_spi, work_data);
1295 unsigned long flags;
1296 u32 status_error = 0;
Alok Chauhanb5f53792012-08-22 19:54:45 +05301297 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001298
1299 mutex_lock(&dd->core_lock);
1300
1301 /* Don't allow power collapse until we release mutex */
1302 if (pm_qos_request_active(&qos_req_list))
1303 pm_qos_update_request(&qos_req_list,
1304 dd->pm_lat);
1305 if (dd->use_rlock)
1306 remote_mutex_lock(&dd->r_lock);
1307
Alok Chauhanb5f53792012-08-22 19:54:45 +05301308 /* Configure the spi clk, miso, mosi and cs gpio */
1309 if (dd->pdata->gpio_config) {
1310 rc = dd->pdata->gpio_config();
1311 if (rc) {
1312 dev_err(dd->dev,
1313 "%s: error configuring GPIOs\n",
1314 __func__);
1315 status_error = 1;
1316 }
1317 }
1318
1319 rc = msm_spi_request_gpios(dd);
1320 if (rc)
1321 status_error = 1;
1322
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001323 clk_prepare_enable(dd->clk);
1324 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325 msm_spi_enable_irqs(dd);
1326
1327 if (!msm_spi_is_valid_state(dd)) {
1328 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1329 __func__);
1330 status_error = 1;
1331 }
1332
1333 spin_lock_irqsave(&dd->queue_lock, flags);
1334 while (!list_empty(&dd->queue)) {
1335 dd->cur_msg = list_entry(dd->queue.next,
1336 struct spi_message, queue);
1337 list_del_init(&dd->cur_msg->queue);
1338 spin_unlock_irqrestore(&dd->queue_lock, flags);
1339 if (status_error)
1340 dd->cur_msg->status = -EIO;
1341 else
1342 msm_spi_process_message(dd);
1343 if (dd->cur_msg->complete)
1344 dd->cur_msg->complete(dd->cur_msg->context);
1345 spin_lock_irqsave(&dd->queue_lock, flags);
1346 }
1347 dd->transfer_pending = 0;
1348 spin_unlock_irqrestore(&dd->queue_lock, flags);
1349
1350 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001351 clk_disable_unprepare(dd->clk);
1352 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001353
Alok Chauhanb5f53792012-08-22 19:54:45 +05301354 /* Free the spi clk, miso, mosi, cs gpio */
1355 if (!rc && dd->pdata && dd->pdata->gpio_release)
1356 dd->pdata->gpio_release();
1357 if (!rc)
1358 msm_spi_free_gpios(dd);
1359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 if (dd->use_rlock)
1361 remote_mutex_unlock(&dd->r_lock);
1362
1363 if (pm_qos_request_active(&qos_req_list))
1364 pm_qos_update_request(&qos_req_list,
1365 PM_QOS_DEFAULT_VALUE);
1366
1367 mutex_unlock(&dd->core_lock);
1368 /* If needed, this can be done after the current message is complete,
1369 and work can be continued upon resume. No motivation for now. */
1370 if (dd->suspended)
1371 wake_up_interruptible(&dd->continue_suspend);
1372}
1373
1374static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1375{
1376 struct msm_spi *dd;
1377 unsigned long flags;
1378 struct spi_transfer *tr;
1379
1380 dd = spi_master_get_devdata(spi->master);
1381 if (dd->suspended)
1382 return -EBUSY;
1383
1384 if (list_empty(&msg->transfers) || !msg->complete)
1385 return -EINVAL;
1386
1387 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1388 /* Check message parameters */
1389 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1390 (tr->bits_per_word &&
1391 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1392 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1393 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1394 "tx=%p, rx=%p\n",
1395 tr->speed_hz, tr->bits_per_word,
1396 tr->tx_buf, tr->rx_buf);
1397 return -EINVAL;
1398 }
1399 }
1400
1401 spin_lock_irqsave(&dd->queue_lock, flags);
1402 if (dd->suspended) {
1403 spin_unlock_irqrestore(&dd->queue_lock, flags);
1404 return -EBUSY;
1405 }
1406 dd->transfer_pending = 1;
1407 list_add_tail(&msg->queue, &dd->queue);
1408 spin_unlock_irqrestore(&dd->queue_lock, flags);
1409 queue_work(dd->workqueue, &dd->work_data);
1410 return 0;
1411}
1412
1413static int msm_spi_setup(struct spi_device *spi)
1414{
1415 struct msm_spi *dd;
1416 int rc = 0;
1417 u32 spi_ioc;
1418 u32 spi_config;
1419 u32 mask;
1420
1421 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1422 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1423 __func__, spi->bits_per_word);
1424 rc = -EINVAL;
1425 }
1426 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1427 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1428 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1429 rc = -EINVAL;
1430 }
1431
1432 if (rc)
1433 goto err_setup_exit;
1434
1435 dd = spi_master_get_devdata(spi->master);
1436
1437 mutex_lock(&dd->core_lock);
1438 if (dd->suspended) {
1439 mutex_unlock(&dd->core_lock);
1440 return -EBUSY;
1441 }
1442
1443 if (dd->use_rlock)
1444 remote_mutex_lock(&dd->r_lock);
1445
Alok Chauhanb5f53792012-08-22 19:54:45 +05301446 /* Configure the spi clk, miso, mosi, cs gpio */
1447 if (dd->pdata->gpio_config) {
1448 rc = dd->pdata->gpio_config();
1449 if (rc) {
1450 dev_err(&spi->dev,
1451 "%s: error configuring GPIOs\n",
1452 __func__);
1453 rc = -ENXIO;
1454 goto err_setup_gpio;
1455 }
1456 }
1457
1458 rc = msm_spi_request_gpios(dd);
1459 if (rc) {
1460 rc = -ENXIO;
1461 goto err_setup_gpio;
1462 }
1463
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001464 clk_prepare_enable(dd->clk);
1465 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466
1467 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1468 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1469 if (spi->mode & SPI_CS_HIGH)
1470 spi_ioc |= mask;
1471 else
1472 spi_ioc &= ~mask;
1473 if (spi->mode & SPI_CPOL)
1474 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1475 else
1476 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1477
1478 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1479
1480 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1481 if (spi->mode & SPI_LOOP)
1482 spi_config |= SPI_CFG_LOOPBACK;
1483 else
1484 spi_config &= ~SPI_CFG_LOOPBACK;
1485 if (spi->mode & SPI_CPHA)
1486 spi_config &= ~SPI_CFG_INPUT_FIRST;
1487 else
1488 spi_config |= SPI_CFG_INPUT_FIRST;
1489 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1490
1491 /* Ensure previous write completed before disabling the clocks */
1492 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001493 clk_disable_unprepare(dd->clk);
1494 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001495
Alok Chauhanb5f53792012-08-22 19:54:45 +05301496 /* Free the spi clk, miso, mosi, cs gpio */
1497 if (dd->pdata && dd->pdata->gpio_release)
1498 dd->pdata->gpio_release();
1499 msm_spi_free_gpios(dd);
1500
1501err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502 if (dd->use_rlock)
1503 remote_mutex_unlock(&dd->r_lock);
1504 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505err_setup_exit:
1506 return rc;
1507}
1508
1509#ifdef CONFIG_DEBUG_FS
1510static int debugfs_iomem_x32_set(void *data, u64 val)
1511{
1512 writel_relaxed(val, data);
1513 /* Ensure the previous write completed. */
1514 mb();
1515 return 0;
1516}
1517
1518static int debugfs_iomem_x32_get(void *data, u64 *val)
1519{
1520 *val = readl_relaxed(data);
1521 /* Ensure the previous read completed. */
1522 mb();
1523 return 0;
1524}
1525
1526DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1527 debugfs_iomem_x32_set, "0x%08llx\n");
1528
1529static void spi_debugfs_init(struct msm_spi *dd)
1530{
1531 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1532 if (dd->dent_spi) {
1533 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1536 dd->debugfs_spi_regs[i] =
1537 debugfs_create_file(
1538 debugfs_spi_regs[i].name,
1539 debugfs_spi_regs[i].mode,
1540 dd->dent_spi,
1541 dd->base + debugfs_spi_regs[i].offset,
1542 &fops_iomem_x32);
1543 }
1544 }
1545}
1546
1547static void spi_debugfs_exit(struct msm_spi *dd)
1548{
1549 if (dd->dent_spi) {
1550 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001551
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001552 debugfs_remove_recursive(dd->dent_spi);
1553 dd->dent_spi = NULL;
1554 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1555 dd->debugfs_spi_regs[i] = NULL;
1556 }
1557}
1558#else
1559static void spi_debugfs_init(struct msm_spi *dd) {}
1560static void spi_debugfs_exit(struct msm_spi *dd) {}
1561#endif
1562
1563/* ===Device attributes begin=== */
1564static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1565 char *buf)
1566{
1567 struct spi_master *master = dev_get_drvdata(dev);
1568 struct msm_spi *dd = spi_master_get_devdata(master);
1569
1570 return snprintf(buf, PAGE_SIZE,
1571 "Device %s\n"
1572 "rx fifo_size = %d spi words\n"
1573 "tx fifo_size = %d spi words\n"
1574 "use_dma ? %s\n"
1575 "rx block size = %d bytes\n"
1576 "tx block size = %d bytes\n"
1577 "burst size = %d bytes\n"
1578 "DMA configuration:\n"
1579 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1580 "--statistics--\n"
1581 "Rx isrs = %d\n"
1582 "Tx isrs = %d\n"
1583 "DMA error = %d\n"
1584 "--debug--\n"
1585 "NA yet\n",
1586 dev_name(dev),
1587 dd->input_fifo_size,
1588 dd->output_fifo_size,
1589 dd->use_dma ? "yes" : "no",
1590 dd->input_block_size,
1591 dd->output_block_size,
1592 dd->burst_size,
1593 dd->tx_dma_chan,
1594 dd->rx_dma_chan,
1595 dd->tx_dma_crci,
1596 dd->rx_dma_crci,
1597 dd->stat_rx + dd->stat_dmov_rx,
1598 dd->stat_tx + dd->stat_dmov_tx,
1599 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1600 );
1601}
1602
1603/* Reset statistics on write */
1604static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1605 const char *buf, size_t count)
1606{
1607 struct msm_spi *dd = dev_get_drvdata(dev);
1608 dd->stat_rx = 0;
1609 dd->stat_tx = 0;
1610 dd->stat_dmov_rx = 0;
1611 dd->stat_dmov_tx = 0;
1612 dd->stat_dmov_rx_err = 0;
1613 dd->stat_dmov_tx_err = 0;
1614 return count;
1615}
1616
1617static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1618
1619static struct attribute *dev_attrs[] = {
1620 &dev_attr_stats.attr,
1621 NULL,
1622};
1623
1624static struct attribute_group dev_attr_grp = {
1625 .attrs = dev_attrs,
1626};
1627/* ===Device attributes end=== */
1628
1629/**
1630 * spi_dmov_tx_complete_func - DataMover tx completion callback
1631 *
1632 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1633 * spinlock @msm_dmov_lock held.
1634 */
1635static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1636 unsigned int result,
1637 struct msm_dmov_errdata *err)
1638{
1639 struct msm_spi *dd;
1640
1641 if (!(result & DMOV_RSLT_VALID)) {
1642 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1643 return;
1644 }
1645 /* restore original context */
1646 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301647 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001648 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301649 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1650 return;
1651 complete(&dd->transfer_complete);
1652 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653 /* Error or flush */
1654 if (result & DMOV_RSLT_ERROR) {
1655 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1656 dd->stat_dmov_tx_err++;
1657 }
1658 if (result & DMOV_RSLT_FLUSH) {
1659 /*
1660 * Flushing normally happens in process of
1661 * removing, when we are waiting for outstanding
1662 * DMA commands to be flushed.
1663 */
1664 dev_info(dd->dev,
1665 "DMA channel flushed (0x%08x)\n", result);
1666 }
1667 if (err)
1668 dev_err(dd->dev,
1669 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1670 err->flush[0], err->flush[1], err->flush[2],
1671 err->flush[3], err->flush[4], err->flush[5]);
1672 dd->cur_msg->status = -EIO;
1673 complete(&dd->transfer_complete);
1674 }
1675}
1676
1677/**
1678 * spi_dmov_rx_complete_func - DataMover rx completion callback
1679 *
1680 * Executed in IRQ context (Data Mover's IRQ)
1681 * DataMover's spinlock @msm_dmov_lock held.
1682 */
1683static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1684 unsigned int result,
1685 struct msm_dmov_errdata *err)
1686{
1687 struct msm_spi *dd;
1688
1689 if (!(result & DMOV_RSLT_VALID)) {
1690 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1691 result, cmd);
1692 return;
1693 }
1694 /* restore original context */
1695 dd = container_of(cmd, struct msm_spi, rx_hdr);
1696 if (result & DMOV_RSLT_DONE) {
1697 dd->stat_dmov_rx++;
1698 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1699 return;
1700 complete(&dd->transfer_complete);
1701 } else {
1702 /** Error or flush */
1703 if (result & DMOV_RSLT_ERROR) {
1704 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1705 dd->stat_dmov_rx_err++;
1706 }
1707 if (result & DMOV_RSLT_FLUSH) {
1708 dev_info(dd->dev,
1709 "DMA channel flushed(0x%08x)\n", result);
1710 }
1711 if (err)
1712 dev_err(dd->dev,
1713 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1714 err->flush[0], err->flush[1], err->flush[2],
1715 err->flush[3], err->flush[4], err->flush[5]);
1716 dd->cur_msg->status = -EIO;
1717 complete(&dd->transfer_complete);
1718 }
1719}
1720
1721static inline u32 get_chunk_size(struct msm_spi *dd)
1722{
1723 u32 cache_line = dma_get_cache_alignment();
1724
1725 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1726 roundup(dd->burst_size, cache_line))*2;
1727}
1728
1729static void msm_spi_teardown_dma(struct msm_spi *dd)
1730{
1731 int limit = 0;
1732
1733 if (!dd->use_dma)
1734 return;
1735
1736 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001737 msm_dmov_flush(dd->tx_dma_chan, 1);
1738 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001739 msleep(10);
1740 }
1741
1742 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1743 dd->tx_dmov_cmd_dma);
1744 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1745 dd->tx_padding = dd->rx_padding = NULL;
1746}
1747
1748static __init int msm_spi_init_dma(struct msm_spi *dd)
1749{
1750 dmov_box *box;
1751 u32 cache_line = dma_get_cache_alignment();
1752
1753 /* Allocate all as one chunk, since all is smaller than page size */
1754
1755 /* We send NULL device, since it requires coherent_dma_mask id
1756 device definition, we're okay with using system pool */
1757 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1758 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1759 if (dd->tx_dmov_cmd == NULL)
1760 return -ENOMEM;
1761
1762 /* DMA addresses should be 64 bit aligned aligned */
1763 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1764 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1765 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1766 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1767
1768 /* Buffers should be aligned to cache line */
1769 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1770 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1771 sizeof(struct spi_dmov_cmd), cache_line);
1772 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1773 cache_line);
1774 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1775 cache_line);
1776
1777 /* Setup DM commands */
1778 box = &(dd->rx_dmov_cmd->box);
1779 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1780 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1781 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1782 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1783 offsetof(struct spi_dmov_cmd, cmd_ptr));
1784 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785
1786 box = &(dd->tx_dmov_cmd->box);
1787 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1788 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1789 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1790 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1791 offsetof(struct spi_dmov_cmd, cmd_ptr));
1792 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793
1794 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1795 CMD_DST_CRCI(dd->tx_dma_crci);
1796 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1797 SPI_OUTPUT_FIFO;
1798 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1799 CMD_SRC_CRCI(dd->rx_dma_crci);
1800 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1801 SPI_INPUT_FIFO;
1802
1803 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001804 msm_dmov_flush(dd->tx_dma_chan, 1);
1805 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001806
1807 return 0;
1808}
1809
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001810struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1811{
1812 struct device_node *node = pdev->dev.of_node;
1813 struct msm_spi_platform_data *pdata;
1814
1815 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1816 if (!pdata) {
1817 pr_err("Unable to allocate platform data\n");
1818 return NULL;
1819 }
1820
1821 of_property_read_u32(node, "spi-max-frequency",
1822 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05301823 of_property_read_u32(node, "infinite_mode",
1824 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001825
1826 return pdata;
1827}
1828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829static int __init msm_spi_probe(struct platform_device *pdev)
1830{
1831 struct spi_master *master;
1832 struct msm_spi *dd;
1833 struct resource *resource;
1834 int rc = -ENXIO;
1835 int locked = 0;
1836 int i = 0;
1837 int clk_enabled = 0;
1838 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001839 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001840 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841
1842 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1843 if (!master) {
1844 rc = -ENOMEM;
1845 dev_err(&pdev->dev, "master allocation failed\n");
1846 goto err_probe_exit;
1847 }
1848
1849 master->bus_num = pdev->id;
1850 master->mode_bits = SPI_SUPPORTED_MODES;
1851 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1852 master->setup = msm_spi_setup;
1853 master->transfer = msm_spi_transfer;
1854 platform_set_drvdata(pdev, master);
1855 dd = spi_master_get_devdata(master);
1856
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001857 if (pdev->dev.of_node) {
1858 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1859 master->dev.of_node = pdev->dev.of_node;
1860 pdata = msm_spi_dt_to_pdata(pdev);
1861 if (!pdata) {
1862 rc = -ENOMEM;
1863 goto err_probe_exit;
1864 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001865
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06001866 rc = of_property_read_u32(pdev->dev.of_node,
1867 "cell-index", &pdev->id);
1868 if (rc)
1869 dev_warn(&pdev->dev,
1870 "using default bus_num %d\n", pdev->id);
1871 else
1872 master->bus_num = pdev->id;
1873
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001874 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1875 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1876 i, &flags);
1877 }
1878
1879 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1880 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1881 pdev->dev.of_node, "cs-gpios",
1882 i, &flags);
1883 dd->cs_gpios[i].valid = 0;
1884 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001885 } else {
1886 pdata = pdev->dev.platform_data;
1887 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001888
1889 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1890 resource = platform_get_resource(pdev, IORESOURCE_IO,
1891 i);
1892 dd->spi_gpios[i] = resource ? resource->start : -1;
1893 }
1894
1895 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1896 resource = platform_get_resource(pdev, IORESOURCE_IO,
1897 i + ARRAY_SIZE(spi_rsrcs));
1898 dd->cs_gpios[i].gpio_num = resource ?
1899 resource->start : -1;
1900 dd->cs_gpios[i].valid = 0;
1901 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001902 }
1903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001905 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001906 if (!resource) {
1907 rc = -ENXIO;
1908 goto err_probe_res;
1909 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911 dd->mem_phys_addr = resource->start;
1912 dd->mem_size = resource_size(resource);
1913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914 if (pdata) {
1915 if (pdata->dma_config) {
1916 rc = pdata->dma_config();
1917 if (rc) {
1918 dev_warn(&pdev->dev,
1919 "%s: DM mode not supported\n",
1920 __func__);
1921 dd->use_dma = 0;
1922 goto skip_dma_resources;
1923 }
1924 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001925 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 if (resource) {
1927 dd->rx_dma_chan = resource->start;
1928 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001929 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1930 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931 if (!resource) {
1932 rc = -ENXIO;
1933 goto err_probe_res;
1934 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936 dd->rx_dma_crci = resource->start;
1937 dd->tx_dma_crci = resource->end;
1938 dd->use_dma = 1;
1939 master->dma_alignment = dma_get_cache_alignment();
1940 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001941 }
1942
Alok Chauhanb5f53792012-08-22 19:54:45 +05301943skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06001944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001945 spin_lock_init(&dd->queue_lock);
1946 mutex_init(&dd->core_lock);
1947 INIT_LIST_HEAD(&dd->queue);
1948 INIT_WORK(&dd->work_data, msm_spi_workq);
1949 init_waitqueue_head(&dd->continue_suspend);
1950 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001951 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 if (!dd->workqueue)
1953 goto err_probe_workq;
1954
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001955 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1956 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 rc = -ENXIO;
1958 goto err_probe_reqmem;
1959 }
1960
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001961 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1962 if (!dd->base) {
1963 rc = -ENOMEM;
1964 goto err_probe_reqmem;
1965 }
1966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967 if (pdata && pdata->rsl_id) {
1968 struct remote_mutex_id rmid;
1969 rmid.r_spinlock_id = pdata->rsl_id;
1970 rmid.delay_us = SPI_TRYLOCK_DELAY;
1971
1972 rc = remote_mutex_init(&dd->r_lock, &rmid);
1973 if (rc) {
1974 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1975 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1976 __func__, rc);
1977 goto err_probe_rlock_init;
1978 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001979
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 dd->use_rlock = 1;
1981 dd->pm_lat = pdata->pm_lat;
Alok Chauhanb5f53792012-08-22 19:54:45 +05301982 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1983 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001986 mutex_lock(&dd->core_lock);
1987 if (dd->use_rlock)
1988 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001989
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001990 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001992 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001994 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995 rc = PTR_ERR(dd->clk);
1996 goto err_probe_clk_get;
1997 }
1998
Matt Wagantallac294852011-08-17 15:44:58 -07001999 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002001 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002002 rc = PTR_ERR(dd->pclk);
2003 goto err_probe_pclk_get;
2004 }
2005
2006 if (pdata && pdata->max_clock_speed)
2007 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2008
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002009 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002011 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012 __func__);
2013 goto err_probe_clk_enable;
2014 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002016 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002017 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002019 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002020 __func__);
2021 goto err_probe_pclk_enable;
2022 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002023
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002025 rc = msm_spi_configure_gsbi(dd, pdev);
2026 if (rc)
2027 goto err_probe_gsbi;
2028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002029 msm_spi_calculate_fifo_size(dd);
2030 if (dd->use_dma) {
2031 rc = msm_spi_init_dma(dd);
2032 if (rc)
2033 goto err_probe_dma;
2034 }
2035
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002036 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037 /*
2038 * The SPI core generates a bogus input overrun error on some targets,
2039 * when a transition from run to reset state occurs and if the FIFO has
2040 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2041 * bit.
2042 */
2043 msm_spi_enable_error_flags(dd);
2044
2045 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2046 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2047 if (rc)
2048 goto err_probe_state;
2049
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002050 clk_disable_unprepare(dd->clk);
2051 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 clk_enabled = 0;
2053 pclk_enabled = 0;
2054
2055 dd->suspended = 0;
2056 dd->transfer_pending = 0;
2057 dd->multi_xfr = 0;
2058 dd->mode = SPI_MODE_NONE;
2059
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002060 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002061 if (rc)
2062 goto err_probe_irq;
2063
2064 msm_spi_disable_irqs(dd);
2065 if (dd->use_rlock)
2066 remote_mutex_unlock(&dd->r_lock);
2067
2068 mutex_unlock(&dd->core_lock);
2069 locked = 0;
2070
2071 rc = spi_register_master(master);
2072 if (rc)
2073 goto err_probe_reg_master;
2074
2075 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2076 if (rc) {
2077 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2078 goto err_attrs;
2079 }
2080
2081 spi_debugfs_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002082 return 0;
2083
2084err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002085 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002086err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087err_probe_irq:
2088err_probe_state:
2089 msm_spi_teardown_dma(dd);
2090err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002091err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002092 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002093 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002094err_probe_pclk_enable:
2095 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002096 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002097err_probe_clk_enable:
2098 clk_put(dd->pclk);
2099err_probe_pclk_get:
2100 clk_put(dd->clk);
2101err_probe_clk_get:
2102 if (locked) {
2103 if (dd->use_rlock)
2104 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002106 mutex_unlock(&dd->core_lock);
2107 }
2108err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002109err_probe_reqmem:
2110 destroy_workqueue(dd->workqueue);
2111err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002112err_probe_res:
2113 spi_master_put(master);
2114err_probe_exit:
2115 return rc;
2116}
2117
2118#ifdef CONFIG_PM
2119static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2120{
2121 struct spi_master *master = platform_get_drvdata(pdev);
2122 struct msm_spi *dd;
2123 unsigned long flags;
2124
2125 if (!master)
2126 goto suspend_exit;
2127 dd = spi_master_get_devdata(master);
2128 if (!dd)
2129 goto suspend_exit;
2130
2131 /* Make sure nothing is added to the queue while we're suspending */
2132 spin_lock_irqsave(&dd->queue_lock, flags);
2133 dd->suspended = 1;
2134 spin_unlock_irqrestore(&dd->queue_lock, flags);
2135
2136 /* Wait for transactions to end, or time out */
2137 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138
2139suspend_exit:
2140 return 0;
2141}
2142
2143static int msm_spi_resume(struct platform_device *pdev)
2144{
2145 struct spi_master *master = platform_get_drvdata(pdev);
2146 struct msm_spi *dd;
2147
2148 if (!master)
2149 goto resume_exit;
2150 dd = spi_master_get_devdata(master);
2151 if (!dd)
2152 goto resume_exit;
2153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154 dd->suspended = 0;
2155resume_exit:
2156 return 0;
2157}
2158#else
2159#define msm_spi_suspend NULL
2160#define msm_spi_resume NULL
2161#endif /* CONFIG_PM */
2162
2163static int __devexit msm_spi_remove(struct platform_device *pdev)
2164{
2165 struct spi_master *master = platform_get_drvdata(pdev);
2166 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002167
2168 pm_qos_remove_request(&qos_req_list);
2169 spi_debugfs_exit(dd);
2170 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174 clk_put(dd->clk);
2175 clk_put(dd->pclk);
2176 destroy_workqueue(dd->workqueue);
2177 platform_set_drvdata(pdev, 0);
2178 spi_unregister_master(master);
2179 spi_master_put(master);
2180
2181 return 0;
2182}
2183
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002184static struct of_device_id msm_spi_dt_match[] = {
2185 {
2186 .compatible = "qcom,spi-qup-v2",
2187 },
2188 {}
2189};
2190
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002191static struct platform_driver msm_spi_driver = {
2192 .driver = {
2193 .name = SPI_DRV_NAME,
2194 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002195 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002196 },
2197 .suspend = msm_spi_suspend,
2198 .resume = msm_spi_resume,
2199 .remove = __exit_p(msm_spi_remove),
2200};
2201
2202static int __init msm_spi_init(void)
2203{
2204 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2205}
2206module_init(msm_spi_init);
2207
2208static void __exit msm_spi_exit(void)
2209{
2210 platform_driver_unregister(&msm_spi_driver);
2211}
2212module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002213
2214MODULE_LICENSE("GPL v2");
2215MODULE_VERSION("0.4");
2216MODULE_ALIAS("platform:"SPI_DRV_NAME);