blob: 2fc95affcea77165e02f9cb96520b5b2c23ae877 [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/list.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/interrupt.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/workqueue.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <mach/msm_spi.h>
33#include <linux/dma-mapping.h>
34#include <linux/sched.h>
35#include <mach/dma.h>
36#include <asm/atomic.h>
37#include <linux/mutex.h>
38#include <linux/gpio.h>
39#include <linux/remote_spinlock.h>
40#include <linux/pm_qos_params.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070041#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070042#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070043#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070045static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
46 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047{
48 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070049 unsigned long gsbi_mem_phys_addr;
50 size_t gsbi_mem_size;
51 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070053 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055 return 0;
56
57 gsbi_mem_phys_addr = resource->start;
58 gsbi_mem_size = resource_size(resource);
59 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
60 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070062
63 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
64 gsbi_mem_size);
65 if (!gsbi_base)
66 return -ENXIO;
67
68 /* Set GSBI to SPI mode */
69 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71 return 0;
72}
73
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070074static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070076 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
77 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
78 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
79 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
80 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
81 if (dd->qup_ver)
82 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083}
84
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085static inline int msm_spi_request_gpios(struct msm_spi *dd)
86{
87 int i;
88 int result = 0;
89
90 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
91 if (dd->spi_gpios[i] >= 0) {
92 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
93 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060094 dev_err(dd->dev, "%s: gpio_request for pin %d "
95 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 dd->spi_gpios[i], result);
97 goto error;
98 }
99 }
100 }
101 return 0;
102
103error:
104 for (; --i >= 0;) {
105 if (dd->spi_gpios[i] >= 0)
106 gpio_free(dd->spi_gpios[i]);
107 }
108 return result;
109}
110
111static inline void msm_spi_free_gpios(struct msm_spi *dd)
112{
113 int i;
114
115 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
116 if (dd->spi_gpios[i] >= 0)
117 gpio_free(dd->spi_gpios[i]);
118 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600119
120 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
121 if (dd->cs_gpios[i].valid) {
122 gpio_free(dd->cs_gpios[i].gpio_num);
123 dd->cs_gpios[i].valid = 0;
124 }
125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126}
127
128static void msm_spi_clock_set(struct msm_spi *dd, int speed)
129{
130 int rc;
131
132 rc = clk_set_rate(dd->clk, speed);
133 if (!rc)
134 dd->clock_speed = speed;
135}
136
137static int msm_spi_calculate_size(int *fifo_size,
138 int *block_size,
139 int block,
140 int mult)
141{
142 int words;
143
144 switch (block) {
145 case 0:
146 words = 1; /* 4 bytes */
147 break;
148 case 1:
149 words = 4; /* 16 bytes */
150 break;
151 case 2:
152 words = 8; /* 32 bytes */
153 break;
154 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700155 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158 switch (mult) {
159 case 0:
160 *fifo_size = words * 2;
161 break;
162 case 1:
163 *fifo_size = words * 4;
164 break;
165 case 2:
166 *fifo_size = words * 8;
167 break;
168 case 3:
169 *fifo_size = words * 16;
170 break;
171 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700172 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 *block_size = words * sizeof(u32); /* in bytes */
176 return 0;
177}
178
179static void get_next_transfer(struct msm_spi *dd)
180{
181 struct spi_transfer *t = dd->cur_transfer;
182
183 if (t->transfer_list.next != &dd->cur_msg->transfers) {
184 dd->cur_transfer = list_entry(t->transfer_list.next,
185 struct spi_transfer,
186 transfer_list);
187 dd->write_buf = dd->cur_transfer->tx_buf;
188 dd->read_buf = dd->cur_transfer->rx_buf;
189 }
190}
191
192static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
193{
194 u32 spi_iom;
195 int block;
196 int mult;
197
198 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
199
200 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
201 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
202 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
203 block, mult)) {
204 goto fifo_size_err;
205 }
206
207 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
208 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
209 if (msm_spi_calculate_size(&dd->output_fifo_size,
210 &dd->output_block_size, block, mult)) {
211 goto fifo_size_err;
212 }
213 /* DM mode is not available for this block size */
214 if (dd->input_block_size == 4 || dd->output_block_size == 4)
215 dd->use_dma = 0;
216
217 /* DM mode is currently unsupported for different block sizes */
218 if (dd->input_block_size != dd->output_block_size)
219 dd->use_dma = 0;
220
221 if (dd->use_dma)
222 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
223
224 return;
225
226fifo_size_err:
227 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700228 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 return;
230}
231
232static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
233{
234 u32 data_in;
235 int i;
236 int shift;
237
238 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
239 if (dd->read_buf) {
240 for (i = 0; (i < dd->bytes_per_word) &&
241 dd->rx_bytes_remaining; i++) {
242 /* The data format depends on bytes_per_word:
243 4 bytes: 0x12345678
244 3 bytes: 0x00123456
245 2 bytes: 0x00001234
246 1 byte : 0x00000012
247 */
248 shift = 8 * (dd->bytes_per_word - i - 1);
249 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
250 dd->rx_bytes_remaining--;
251 }
252 } else {
253 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
254 dd->rx_bytes_remaining -= dd->bytes_per_word;
255 else
256 dd->rx_bytes_remaining = 0;
257 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 dd->read_xfr_cnt++;
260 if (dd->multi_xfr) {
261 if (!dd->rx_bytes_remaining)
262 dd->read_xfr_cnt = 0;
263 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
264 dd->read_len) {
265 struct spi_transfer *t = dd->cur_rx_transfer;
266 if (t->transfer_list.next != &dd->cur_msg->transfers) {
267 t = list_entry(t->transfer_list.next,
268 struct spi_transfer,
269 transfer_list);
270 dd->read_buf = t->rx_buf;
271 dd->read_len = t->len;
272 dd->read_xfr_cnt = 0;
273 dd->cur_rx_transfer = t;
274 }
275 }
276 }
277}
278
279static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
280{
281 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
282
283 return spi_op & SPI_OP_STATE_VALID;
284}
285
286static inline int msm_spi_wait_valid(struct msm_spi *dd)
287{
288 unsigned long delay = 0;
289 unsigned long timeout = 0;
290
291 if (dd->clock_speed == 0)
292 return -EINVAL;
293 /*
294 * Based on the SPI clock speed, sufficient time
295 * should be given for the SPI state transition
296 * to occur
297 */
298 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
299 /*
300 * For small delay values, the default timeout would
301 * be one jiffy
302 */
303 if (delay < SPI_DELAY_THRESHOLD)
304 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600305
306 /* Adding one to round off to the nearest jiffy */
307 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308 while (!msm_spi_is_valid_state(dd)) {
309 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600310 if (!msm_spi_is_valid_state(dd)) {
311 if (dd->cur_msg)
312 dd->cur_msg->status = -EIO;
313 dev_err(dd->dev, "%s: SPI operational state"
314 "not valid\n", __func__);
315 return -ETIMEDOUT;
316 } else
317 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 }
319 /*
320 * For smaller values of delay, context switch time
321 * would negate the usage of usleep
322 */
323 if (delay > 20)
324 usleep(delay);
325 else if (delay)
326 udelay(delay);
327 }
328 return 0;
329}
330
331static inline int msm_spi_set_state(struct msm_spi *dd,
332 enum msm_spi_state state)
333{
334 enum msm_spi_state cur_state;
335 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700336 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337 cur_state = readl_relaxed(dd->base + SPI_STATE);
338 /* Per spec:
339 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
340 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
341 (state == SPI_OP_STATE_RESET)) {
342 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
343 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
344 } else {
345 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
346 dd->base + SPI_STATE);
347 }
348 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700349 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350
351 return 0;
352}
353
354static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
355{
356 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
357
358 if (n != (*config & SPI_CFG_N))
359 *config = (*config & ~SPI_CFG_N) | n;
360
361 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
362 if (dd->read_buf == NULL)
363 *config |= SPI_NO_INPUT;
364 if (dd->write_buf == NULL)
365 *config |= SPI_NO_OUTPUT;
366 }
367}
368
369static void msm_spi_set_config(struct msm_spi *dd, int bpw)
370{
371 u32 spi_config;
372
373 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
374
375 if (dd->cur_msg->spi->mode & SPI_CPHA)
376 spi_config &= ~SPI_CFG_INPUT_FIRST;
377 else
378 spi_config |= SPI_CFG_INPUT_FIRST;
379 if (dd->cur_msg->spi->mode & SPI_LOOP)
380 spi_config |= SPI_CFG_LOOPBACK;
381 else
382 spi_config &= ~SPI_CFG_LOOPBACK;
383 msm_spi_add_configs(dd, &spi_config, bpw-1);
384 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
385 msm_spi_set_qup_config(dd, bpw);
386}
387
388static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
389{
390 dmov_box *box;
391 int bytes_to_send, num_rows, bytes_sent;
392 u32 num_transfers;
393
394 atomic_set(&dd->rx_irq_called, 0);
395 if (dd->write_len && !dd->read_len) {
396 /* WR-WR transfer */
397 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
398 dd->write_buf = dd->temp_buf;
399 } else {
400 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
401 /* For WR-RD transfer, bytes_sent can be negative */
402 if (bytes_sent < 0)
403 bytes_sent = 0;
404 }
405
406 /* We'll send in chunks of SPI_MAX_LEN if larger */
407 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
408 SPI_MAX_LEN : dd->tx_bytes_remaining;
409 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
410 dd->unaligned_len = bytes_to_send % dd->burst_size;
411 num_rows = bytes_to_send / dd->burst_size;
412
413 dd->mode = SPI_DMOV_MODE;
414
415 if (num_rows) {
416 /* src in 16 MSB, dst in 16 LSB */
417 box = &dd->tx_dmov_cmd->box;
418 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
419 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
420 box->num_rows = (num_rows << 16) | num_rows;
421 box->row_offset = (dd->burst_size << 16) | 0;
422
423 box = &dd->rx_dmov_cmd->box;
424 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
425 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
426 box->num_rows = (num_rows << 16) | num_rows;
427 box->row_offset = (0 << 16) | dd->burst_size;
428
429 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
430 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
431 offsetof(struct spi_dmov_cmd, box));
432 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
433 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
434 offsetof(struct spi_dmov_cmd, box));
435 } else {
436 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
437 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
438 offsetof(struct spi_dmov_cmd, single_pad));
439 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
440 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
441 offsetof(struct spi_dmov_cmd, single_pad));
442 }
443
444 if (!dd->unaligned_len) {
445 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
446 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
447 } else {
448 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
449 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
450 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
451
452 if ((dd->multi_xfr) && (dd->read_len <= 0))
453 offset = dd->cur_msg_len - dd->unaligned_len;
454
455 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
456 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
457
458 memset(dd->tx_padding, 0, dd->burst_size);
459 memset(dd->rx_padding, 0, dd->burst_size);
460 if (dd->write_buf)
461 memcpy(dd->tx_padding, dd->write_buf + offset,
462 dd->unaligned_len);
463
464 tx_cmd->src = dd->tx_padding_dma;
465 rx_cmd->dst = dd->rx_padding_dma;
466 tx_cmd->len = rx_cmd->len = dd->burst_size;
467 }
468 /* This also takes care of the padding dummy buf
469 Since this is set to the correct length, the
470 dummy bytes won't be actually sent */
471 if (dd->multi_xfr) {
472 u32 write_transfers = 0;
473 u32 read_transfers = 0;
474
475 if (dd->write_len > 0) {
476 write_transfers = DIV_ROUND_UP(dd->write_len,
477 dd->bytes_per_word);
478 writel_relaxed(write_transfers,
479 dd->base + SPI_MX_OUTPUT_COUNT);
480 }
481 if (dd->read_len > 0) {
482 /*
483 * The read following a write transfer must take
484 * into account, that the bytes pertaining to
485 * the write transfer needs to be discarded,
486 * before the actual read begins.
487 */
488 read_transfers = DIV_ROUND_UP(dd->read_len +
489 dd->write_len,
490 dd->bytes_per_word);
491 writel_relaxed(read_transfers,
492 dd->base + SPI_MX_INPUT_COUNT);
493 }
494 } else {
495 if (dd->write_buf)
496 writel_relaxed(num_transfers,
497 dd->base + SPI_MX_OUTPUT_COUNT);
498 if (dd->read_buf)
499 writel_relaxed(num_transfers,
500 dd->base + SPI_MX_INPUT_COUNT);
501 }
502}
503
504static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
505{
506 dma_coherent_pre_ops();
507 if (dd->write_buf)
508 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
509 if (dd->read_buf)
510 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
511}
512
513/* SPI core can send maximum of 4K transfers, because there is HW problem
514 with infinite mode.
515 Therefore, we are sending several chunks of 3K or less (depending on how
516 much is left).
517 Upon completion we send the next chunk, or complete the transfer if
518 everything is finished.
519*/
520static int msm_spi_dm_send_next(struct msm_spi *dd)
521{
522 /* By now we should have sent all the bytes in FIFO mode,
523 * However to make things right, we'll check anyway.
524 */
525 if (dd->mode != SPI_DMOV_MODE)
526 return 0;
527
528 /* We need to send more chunks, if we sent max last time */
529 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
530 dd->tx_bytes_remaining -= SPI_MAX_LEN;
531 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
532 return 0;
533 dd->read_len = dd->write_len = 0;
534 msm_spi_setup_dm_transfer(dd);
535 msm_spi_enqueue_dm_commands(dd);
536 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
537 return 0;
538 return 1;
539 } else if (dd->read_len && dd->write_len) {
540 dd->tx_bytes_remaining -= dd->cur_transfer->len;
541 if (list_is_last(&dd->cur_transfer->transfer_list,
542 &dd->cur_msg->transfers))
543 return 0;
544 get_next_transfer(dd);
545 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
546 return 0;
547 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
548 dd->read_buf = dd->temp_buf;
549 dd->read_len = dd->write_len = -1;
550 msm_spi_setup_dm_transfer(dd);
551 msm_spi_enqueue_dm_commands(dd);
552 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
553 return 0;
554 return 1;
555 }
556 return 0;
557}
558
559static inline void msm_spi_ack_transfer(struct msm_spi *dd)
560{
561 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
562 SPI_OP_MAX_OUTPUT_DONE_FLAG,
563 dd->base + SPI_OPERATIONAL);
564 /* Ensure done flag was cleared before proceeding further */
565 mb();
566}
567
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700568/* Figure which irq occured and call the relevant functions */
569static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
570{
571 u32 op, ret = IRQ_NONE;
572 struct msm_spi *dd = dev_id;
573
574 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
575 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
576 struct spi_master *master = dev_get_drvdata(dd->dev);
577 ret |= msm_spi_error_irq(irq, master);
578 }
579
580 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
581 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
582 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
583 dd->base + SPI_OPERATIONAL);
584 /*
585 * Ensure service flag was cleared before further
586 * processing of interrupt.
587 */
588 mb();
589 ret |= msm_spi_input_irq(irq, dev_id);
590 }
591
592 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
593 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
594 dd->base + SPI_OPERATIONAL);
595 /*
596 * Ensure service flag was cleared before further
597 * processing of interrupt.
598 */
599 mb();
600 ret |= msm_spi_output_irq(irq, dev_id);
601 }
602
603 if (dd->done) {
604 complete(&dd->transfer_complete);
605 dd->done = 0;
606 }
607 return ret;
608}
609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
611{
612 struct msm_spi *dd = dev_id;
613
614 dd->stat_rx++;
615
616 if (dd->mode == SPI_MODE_NONE)
617 return IRQ_HANDLED;
618
619 if (dd->mode == SPI_DMOV_MODE) {
620 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
621 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
622 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
623 msm_spi_ack_transfer(dd);
624 if (dd->unaligned_len == 0) {
625 if (atomic_inc_return(&dd->rx_irq_called) == 1)
626 return IRQ_HANDLED;
627 }
628 msm_spi_complete(dd);
629 return IRQ_HANDLED;
630 }
631 return IRQ_NONE;
632 }
633
634 if (dd->mode == SPI_FIFO_MODE) {
635 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
636 SPI_OP_IP_FIFO_NOT_EMPTY) &&
637 (dd->rx_bytes_remaining > 0)) {
638 msm_spi_read_word_from_fifo(dd);
639 }
640 if (dd->rx_bytes_remaining == 0)
641 msm_spi_complete(dd);
642 }
643
644 return IRQ_HANDLED;
645}
646
647static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
648{
649 u32 word;
650 u8 byte;
651 int i;
652
653 word = 0;
654 if (dd->write_buf) {
655 for (i = 0; (i < dd->bytes_per_word) &&
656 dd->tx_bytes_remaining; i++) {
657 dd->tx_bytes_remaining--;
658 byte = *dd->write_buf++;
659 word |= (byte << (BITS_PER_BYTE * (3 - i)));
660 }
661 } else
662 if (dd->tx_bytes_remaining > dd->bytes_per_word)
663 dd->tx_bytes_remaining -= dd->bytes_per_word;
664 else
665 dd->tx_bytes_remaining = 0;
666 dd->write_xfr_cnt++;
667 if (dd->multi_xfr) {
668 if (!dd->tx_bytes_remaining)
669 dd->write_xfr_cnt = 0;
670 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
671 dd->write_len) {
672 struct spi_transfer *t = dd->cur_tx_transfer;
673 if (t->transfer_list.next != &dd->cur_msg->transfers) {
674 t = list_entry(t->transfer_list.next,
675 struct spi_transfer,
676 transfer_list);
677 dd->write_buf = t->tx_buf;
678 dd->write_len = t->len;
679 dd->write_xfr_cnt = 0;
680 dd->cur_tx_transfer = t;
681 }
682 }
683 }
684 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
685}
686
687static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
688{
689 int count = 0;
690
691 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
692 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
693 SPI_OP_OUTPUT_FIFO_FULL)) {
694 msm_spi_write_word_to_fifo(dd);
695 count++;
696 }
697}
698
699static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
700{
701 struct msm_spi *dd = dev_id;
702
703 dd->stat_tx++;
704
705 if (dd->mode == SPI_MODE_NONE)
706 return IRQ_HANDLED;
707
708 if (dd->mode == SPI_DMOV_MODE) {
709 /* TX_ONLY transaction is handled here
710 This is the only place we send complete at tx and not rx */
711 if (dd->read_buf == NULL &&
712 readl_relaxed(dd->base + SPI_OPERATIONAL) &
713 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
714 msm_spi_ack_transfer(dd);
715 msm_spi_complete(dd);
716 return IRQ_HANDLED;
717 }
718 return IRQ_NONE;
719 }
720
721 /* Output FIFO is empty. Transmit any outstanding write data. */
722 if (dd->mode == SPI_FIFO_MODE)
723 msm_spi_write_rmn_to_fifo(dd);
724
725 return IRQ_HANDLED;
726}
727
728static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
729{
730 struct spi_master *master = dev_id;
731 struct msm_spi *dd = spi_master_get_devdata(master);
732 u32 spi_err;
733
734 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
735 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
736 dev_warn(master->dev.parent, "SPI output overrun error\n");
737 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
738 dev_warn(master->dev.parent, "SPI input underrun error\n");
739 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
740 dev_warn(master->dev.parent, "SPI output underrun error\n");
741 msm_spi_get_clk_err(dd, &spi_err);
742 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
743 dev_warn(master->dev.parent, "SPI clock overrun error\n");
744 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
745 dev_warn(master->dev.parent, "SPI clock underrun error\n");
746 msm_spi_clear_error_flags(dd);
747 msm_spi_ack_clk_err(dd);
748 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
749 mb();
750 return IRQ_HANDLED;
751}
752
753static int msm_spi_map_dma_buffers(struct msm_spi *dd)
754{
755 struct device *dev;
756 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600757 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758 void *tx_buf, *rx_buf;
759 unsigned tx_len, rx_len;
760 int ret = -EINVAL;
761
762 dev = &dd->cur_msg->spi->dev;
763 first_xfr = dd->cur_transfer;
764 tx_buf = (void *)first_xfr->tx_buf;
765 rx_buf = first_xfr->rx_buf;
766 tx_len = rx_len = first_xfr->len;
767
768 /*
769 * For WR-WR and WR-RD transfers, we allocate our own temporary
770 * buffer and copy the data to/from the client buffers.
771 */
772 if (dd->multi_xfr) {
773 dd->temp_buf = kzalloc(dd->cur_msg_len,
774 GFP_KERNEL | __GFP_DMA);
775 if (!dd->temp_buf)
776 return -ENOMEM;
777 nxt_xfr = list_entry(first_xfr->transfer_list.next,
778 struct spi_transfer, transfer_list);
779
780 if (dd->write_len && !dd->read_len) {
781 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
782 goto error;
783
784 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
785 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
786 nxt_xfr->len);
787 tx_buf = dd->temp_buf;
788 tx_len = dd->cur_msg_len;
789 } else {
790 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
791 goto error;
792
793 rx_buf = dd->temp_buf;
794 rx_len = dd->cur_msg_len;
795 }
796 }
797 if (tx_buf != NULL) {
798 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
799 tx_len, DMA_TO_DEVICE);
800 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
801 dev_err(dev, "dma %cX %d bytes error\n",
802 'T', tx_len);
803 ret = -ENOMEM;
804 goto error;
805 }
806 }
807 if (rx_buf != NULL) {
808 dma_addr_t dma_handle;
809 dma_handle = dma_map_single(dev, rx_buf,
810 rx_len, DMA_FROM_DEVICE);
811 if (dma_mapping_error(NULL, dma_handle)) {
812 dev_err(dev, "dma %cX %d bytes error\n",
813 'R', rx_len);
814 if (tx_buf != NULL)
815 dma_unmap_single(NULL, first_xfr->tx_dma,
816 tx_len, DMA_TO_DEVICE);
817 ret = -ENOMEM;
818 goto error;
819 }
820 if (dd->multi_xfr)
821 nxt_xfr->rx_dma = dma_handle;
822 else
823 first_xfr->rx_dma = dma_handle;
824 }
825 return 0;
826
827error:
828 kfree(dd->temp_buf);
829 dd->temp_buf = NULL;
830 return ret;
831}
832
833static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
834{
835 struct device *dev;
836 u32 offset;
837
838 dev = &dd->cur_msg->spi->dev;
839 if (dd->cur_msg->is_dma_mapped)
840 goto unmap_end;
841
842 if (dd->multi_xfr) {
843 if (dd->write_len && !dd->read_len) {
844 dma_unmap_single(dev,
845 dd->cur_transfer->tx_dma,
846 dd->cur_msg_len,
847 DMA_TO_DEVICE);
848 } else {
849 struct spi_transfer *prev_xfr;
850 prev_xfr = list_entry(
851 dd->cur_transfer->transfer_list.prev,
852 struct spi_transfer,
853 transfer_list);
854 if (dd->cur_transfer->rx_buf) {
855 dma_unmap_single(dev,
856 dd->cur_transfer->rx_dma,
857 dd->cur_msg_len,
858 DMA_FROM_DEVICE);
859 }
860 if (prev_xfr->tx_buf) {
861 dma_unmap_single(dev,
862 prev_xfr->tx_dma,
863 prev_xfr->len,
864 DMA_TO_DEVICE);
865 }
866 if (dd->unaligned_len && dd->read_buf) {
867 offset = dd->cur_msg_len - dd->unaligned_len;
868 dma_coherent_post_ops();
869 memcpy(dd->read_buf + offset, dd->rx_padding,
870 dd->unaligned_len);
871 memcpy(dd->cur_transfer->rx_buf,
872 dd->read_buf + prev_xfr->len,
873 dd->cur_transfer->len);
874 }
875 }
876 kfree(dd->temp_buf);
877 dd->temp_buf = NULL;
878 return;
879 } else {
880 if (dd->cur_transfer->rx_buf)
881 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
882 dd->cur_transfer->len,
883 DMA_FROM_DEVICE);
884 if (dd->cur_transfer->tx_buf)
885 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
886 dd->cur_transfer->len,
887 DMA_TO_DEVICE);
888 }
889
890unmap_end:
891 /* If we padded the transfer, we copy it from the padding buf */
892 if (dd->unaligned_len && dd->read_buf) {
893 offset = dd->cur_transfer->len - dd->unaligned_len;
894 dma_coherent_post_ops();
895 memcpy(dd->read_buf + offset, dd->rx_padding,
896 dd->unaligned_len);
897 }
898}
899
900/**
901 * msm_use_dm - decides whether to use data mover for this
902 * transfer
903 * @dd: device
904 * @tr: transfer
905 *
906 * Start using DM if:
907 * 1. Transfer is longer than 3*block size.
908 * 2. Buffers should be aligned to cache line.
909 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
910 */
911static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
912 u8 bpw)
913{
914 u32 cache_line = dma_get_cache_alignment();
915
916 if (!dd->use_dma)
917 return 0;
918
919 if (dd->cur_msg_len < 3*dd->input_block_size)
920 return 0;
921
922 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
923 return 0;
924
925 if (tr->tx_buf) {
926 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
927 return 0;
928 }
929 if (tr->rx_buf) {
930 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
931 return 0;
932 }
933
934 if (tr->cs_change &&
935 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
936 return 0;
937 return 1;
938}
939
940static void msm_spi_process_transfer(struct msm_spi *dd)
941{
942 u8 bpw;
943 u32 spi_ioc;
944 u32 spi_iom;
945 u32 spi_ioc_orig;
946 u32 max_speed;
947 u32 chip_select;
948 u32 read_count;
949 u32 timeout;
950 u32 int_loopback = 0;
951
952 dd->tx_bytes_remaining = dd->cur_msg_len;
953 dd->rx_bytes_remaining = dd->cur_msg_len;
954 dd->read_buf = dd->cur_transfer->rx_buf;
955 dd->write_buf = dd->cur_transfer->tx_buf;
956 init_completion(&dd->transfer_complete);
957 if (dd->cur_transfer->bits_per_word)
958 bpw = dd->cur_transfer->bits_per_word;
959 else
960 if (dd->cur_msg->spi->bits_per_word)
961 bpw = dd->cur_msg->spi->bits_per_word;
962 else
963 bpw = 8;
964 dd->bytes_per_word = (bpw + 7) / 8;
965
966 if (dd->cur_transfer->speed_hz)
967 max_speed = dd->cur_transfer->speed_hz;
968 else
969 max_speed = dd->cur_msg->spi->max_speed_hz;
970 if (!dd->clock_speed || max_speed != dd->clock_speed)
971 msm_spi_clock_set(dd, max_speed);
972
973 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
974 if (dd->cur_msg->spi->mode & SPI_LOOP)
975 int_loopback = 1;
976 if (int_loopback && dd->multi_xfr &&
977 (read_count > dd->input_fifo_size)) {
978 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700979 pr_err(
980 "%s:Internal Loopback does not support > fifo size"
981 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 __func__);
983 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700984 pr_err(
985 "%s:Internal Loopback does not support > fifo size"
986 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987 __func__);
988 return;
989 }
990 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
991 dd->mode = SPI_FIFO_MODE;
992 if (dd->multi_xfr) {
993 dd->read_len = dd->cur_transfer->len;
994 dd->write_len = dd->cur_transfer->len;
995 }
996 /* read_count cannot exceed fifo_size, and only one READ COUNT
997 interrupt is generated per transaction, so for transactions
998 larger than fifo size READ COUNT must be disabled.
999 For those transactions we usually move to Data Mover mode.
1000 */
1001 if (read_count <= dd->input_fifo_size) {
1002 writel_relaxed(read_count,
1003 dd->base + SPI_MX_READ_COUNT);
1004 msm_spi_set_write_count(dd, read_count);
1005 } else {
1006 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1007 msm_spi_set_write_count(dd, 0);
1008 }
1009 } else {
1010 dd->mode = SPI_DMOV_MODE;
1011 if (dd->write_len && dd->read_len) {
1012 dd->tx_bytes_remaining = dd->write_len;
1013 dd->rx_bytes_remaining = dd->read_len;
1014 }
1015 }
1016
1017 /* Write mode - fifo or data mover*/
1018 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1019 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1020 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1021 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1022 /* Turn on packing for data mover */
1023 if (dd->mode == SPI_DMOV_MODE)
1024 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1025 else
1026 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1027 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1028
1029 msm_spi_set_config(dd, bpw);
1030
1031 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1032 spi_ioc_orig = spi_ioc;
1033 if (dd->cur_msg->spi->mode & SPI_CPOL)
1034 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1035 else
1036 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1037 chip_select = dd->cur_msg->spi->chip_select << 2;
1038 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1039 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1040 if (!dd->cur_transfer->cs_change)
1041 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1042 if (spi_ioc != spi_ioc_orig)
1043 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1044
1045 if (dd->mode == SPI_DMOV_MODE) {
1046 msm_spi_setup_dm_transfer(dd);
1047 msm_spi_enqueue_dm_commands(dd);
1048 }
1049 /* The output fifo interrupt handler will handle all writes after
1050 the first. Restricting this to one write avoids contention
1051 issues and race conditions between this thread and the int handler
1052 */
1053 else if (dd->mode == SPI_FIFO_MODE) {
1054 if (msm_spi_prepare_for_write(dd))
1055 goto transfer_end;
1056 msm_spi_start_write(dd, read_count);
1057 }
1058
1059 /* Only enter the RUN state after the first word is written into
1060 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1061 might fire before the first word is written resulting in a
1062 possible race condition.
1063 */
1064 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1065 goto transfer_end;
1066
1067 timeout = 100 * msecs_to_jiffies(
1068 DIV_ROUND_UP(dd->cur_msg_len * 8,
1069 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1070
1071 /* Assume success, this might change later upon transaction result */
1072 dd->cur_msg->status = 0;
1073 do {
1074 if (!wait_for_completion_timeout(&dd->transfer_complete,
1075 timeout)) {
1076 dev_err(dd->dev, "%s: SPI transaction "
1077 "timeout\n", __func__);
1078 dd->cur_msg->status = -EIO;
1079 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001080 msm_dmov_flush(dd->tx_dma_chan, 1);
1081 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082 }
1083 break;
1084 }
1085 } while (msm_spi_dm_send_next(dd));
1086
1087transfer_end:
1088 if (dd->mode == SPI_DMOV_MODE)
1089 msm_spi_unmap_dma_buffers(dd);
1090 dd->mode = SPI_MODE_NONE;
1091
1092 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1093 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1094 dd->base + SPI_IO_CONTROL);
1095}
1096
1097static void get_transfer_length(struct msm_spi *dd)
1098{
1099 struct spi_transfer *tr;
1100 int num_xfrs = 0;
1101 int readlen = 0;
1102 int writelen = 0;
1103
1104 dd->cur_msg_len = 0;
1105 dd->multi_xfr = 0;
1106 dd->read_len = dd->write_len = 0;
1107
1108 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1109 if (tr->tx_buf)
1110 writelen += tr->len;
1111 if (tr->rx_buf)
1112 readlen += tr->len;
1113 dd->cur_msg_len += tr->len;
1114 num_xfrs++;
1115 }
1116
1117 if (num_xfrs == 2) {
1118 struct spi_transfer *first_xfr = dd->cur_transfer;
1119
1120 dd->multi_xfr = 1;
1121 tr = list_entry(first_xfr->transfer_list.next,
1122 struct spi_transfer,
1123 transfer_list);
1124 /*
1125 * We update dd->read_len and dd->write_len only
1126 * for WR-WR and WR-RD transfers.
1127 */
1128 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1129 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1130 ((!tr->tx_buf) && (tr->rx_buf))) {
1131 dd->read_len = readlen;
1132 dd->write_len = writelen;
1133 }
1134 }
1135 } else if (num_xfrs > 1)
1136 dd->multi_xfr = 1;
1137}
1138
1139static inline int combine_transfers(struct msm_spi *dd)
1140{
1141 struct spi_transfer *t = dd->cur_transfer;
1142 struct spi_transfer *nxt;
1143 int xfrs_grped = 1;
1144
1145 dd->cur_msg_len = dd->cur_transfer->len;
1146 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1147 nxt = list_entry(t->transfer_list.next,
1148 struct spi_transfer,
1149 transfer_list);
1150 if (t->cs_change != nxt->cs_change)
1151 return xfrs_grped;
1152 dd->cur_msg_len += nxt->len;
1153 xfrs_grped++;
1154 t = nxt;
1155 }
1156 return xfrs_grped;
1157}
1158
Harini Jayaraman093938a2012-04-20 15:33:23 -06001159static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1160{
1161 u32 spi_ioc;
1162 u32 spi_ioc_orig;
1163
1164 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1165 spi_ioc_orig = spi_ioc;
1166 if (set_flag)
1167 spi_ioc |= SPI_IO_C_FORCE_CS;
1168 else
1169 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1170
1171 if (spi_ioc != spi_ioc_orig)
1172 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1173}
1174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175static void msm_spi_process_message(struct msm_spi *dd)
1176{
1177 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001178 int cs_num;
1179 int rc;
1180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001182 cs_num = dd->cur_msg->spi->chip_select;
1183 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1184 (!(dd->cs_gpios[cs_num].valid)) &&
1185 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1186 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1187 spi_cs_rsrcs[cs_num]);
1188 if (rc) {
1189 dev_err(dd->dev, "gpio_request for pin %d failed with "
1190 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1191 rc);
1192 return;
1193 }
1194 dd->cs_gpios[cs_num].valid = 1;
1195 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001197 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001198 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001200 &dd->cur_msg->transfers,
1201 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001202 struct spi_transfer *t = dd->cur_transfer;
1203 struct spi_transfer *nxt;
1204
1205 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1206 nxt = list_entry(t->transfer_list.next,
1207 struct spi_transfer,
1208 transfer_list);
1209
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001210 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001211 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001212 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001213 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001215
1216 dd->cur_msg_len = dd->cur_transfer->len;
1217 msm_spi_process_transfer(dd);
1218 }
1219 } else {
1220 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1221 struct spi_transfer,
1222 transfer_list);
1223 get_transfer_length(dd);
1224 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1225 /*
1226 * Handling of multi-transfers.
1227 * FIFO mode is used by default
1228 */
1229 list_for_each_entry(dd->cur_transfer,
1230 &dd->cur_msg->transfers,
1231 transfer_list) {
1232 if (!dd->cur_transfer->len)
1233 goto error;
1234 if (xfrs_grped) {
1235 xfrs_grped--;
1236 continue;
1237 } else {
1238 dd->read_len = dd->write_len = 0;
1239 xfrs_grped = combine_transfers(dd);
1240 }
1241
1242 dd->cur_tx_transfer = dd->cur_transfer;
1243 dd->cur_rx_transfer = dd->cur_transfer;
1244 msm_spi_process_transfer(dd);
1245 xfrs_grped--;
1246 }
1247 } else {
1248 /* Handling of a single transfer or
1249 * WR-WR or WR-RD transfers
1250 */
1251 if ((!dd->cur_msg->is_dma_mapped) &&
1252 (msm_use_dm(dd, dd->cur_transfer,
1253 dd->cur_transfer->bits_per_word))) {
1254 /* Mapping of DMA buffers */
1255 int ret = msm_spi_map_dma_buffers(dd);
1256 if (ret < 0) {
1257 dd->cur_msg->status = ret;
1258 goto error;
1259 }
1260 }
1261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 dd->cur_tx_transfer = dd->cur_transfer;
1263 dd->cur_rx_transfer = dd->cur_transfer;
1264 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001266 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001267
1268 return;
1269
1270error:
1271 if (dd->cs_gpios[cs_num].valid) {
1272 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1273 dd->cs_gpios[cs_num].valid = 0;
1274 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275}
1276
1277/* workqueue - pull messages from queue & process */
1278static void msm_spi_workq(struct work_struct *work)
1279{
1280 struct msm_spi *dd =
1281 container_of(work, struct msm_spi, work_data);
1282 unsigned long flags;
1283 u32 status_error = 0;
1284
1285 mutex_lock(&dd->core_lock);
1286
1287 /* Don't allow power collapse until we release mutex */
1288 if (pm_qos_request_active(&qos_req_list))
1289 pm_qos_update_request(&qos_req_list,
1290 dd->pm_lat);
1291 if (dd->use_rlock)
1292 remote_mutex_lock(&dd->r_lock);
1293
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001294 clk_prepare_enable(dd->clk);
1295 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296 msm_spi_enable_irqs(dd);
1297
1298 if (!msm_spi_is_valid_state(dd)) {
1299 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1300 __func__);
1301 status_error = 1;
1302 }
1303
1304 spin_lock_irqsave(&dd->queue_lock, flags);
1305 while (!list_empty(&dd->queue)) {
1306 dd->cur_msg = list_entry(dd->queue.next,
1307 struct spi_message, queue);
1308 list_del_init(&dd->cur_msg->queue);
1309 spin_unlock_irqrestore(&dd->queue_lock, flags);
1310 if (status_error)
1311 dd->cur_msg->status = -EIO;
1312 else
1313 msm_spi_process_message(dd);
1314 if (dd->cur_msg->complete)
1315 dd->cur_msg->complete(dd->cur_msg->context);
1316 spin_lock_irqsave(&dd->queue_lock, flags);
1317 }
1318 dd->transfer_pending = 0;
1319 spin_unlock_irqrestore(&dd->queue_lock, flags);
1320
1321 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001322 clk_disable_unprepare(dd->clk);
1323 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324
1325 if (dd->use_rlock)
1326 remote_mutex_unlock(&dd->r_lock);
1327
1328 if (pm_qos_request_active(&qos_req_list))
1329 pm_qos_update_request(&qos_req_list,
1330 PM_QOS_DEFAULT_VALUE);
1331
1332 mutex_unlock(&dd->core_lock);
1333 /* If needed, this can be done after the current message is complete,
1334 and work can be continued upon resume. No motivation for now. */
1335 if (dd->suspended)
1336 wake_up_interruptible(&dd->continue_suspend);
1337}
1338
1339static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1340{
1341 struct msm_spi *dd;
1342 unsigned long flags;
1343 struct spi_transfer *tr;
1344
1345 dd = spi_master_get_devdata(spi->master);
1346 if (dd->suspended)
1347 return -EBUSY;
1348
1349 if (list_empty(&msg->transfers) || !msg->complete)
1350 return -EINVAL;
1351
1352 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1353 /* Check message parameters */
1354 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1355 (tr->bits_per_word &&
1356 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1357 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1358 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1359 "tx=%p, rx=%p\n",
1360 tr->speed_hz, tr->bits_per_word,
1361 tr->tx_buf, tr->rx_buf);
1362 return -EINVAL;
1363 }
1364 }
1365
1366 spin_lock_irqsave(&dd->queue_lock, flags);
1367 if (dd->suspended) {
1368 spin_unlock_irqrestore(&dd->queue_lock, flags);
1369 return -EBUSY;
1370 }
1371 dd->transfer_pending = 1;
1372 list_add_tail(&msg->queue, &dd->queue);
1373 spin_unlock_irqrestore(&dd->queue_lock, flags);
1374 queue_work(dd->workqueue, &dd->work_data);
1375 return 0;
1376}
1377
1378static int msm_spi_setup(struct spi_device *spi)
1379{
1380 struct msm_spi *dd;
1381 int rc = 0;
1382 u32 spi_ioc;
1383 u32 spi_config;
1384 u32 mask;
1385
1386 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1387 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1388 __func__, spi->bits_per_word);
1389 rc = -EINVAL;
1390 }
1391 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1392 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1393 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1394 rc = -EINVAL;
1395 }
1396
1397 if (rc)
1398 goto err_setup_exit;
1399
1400 dd = spi_master_get_devdata(spi->master);
1401
1402 mutex_lock(&dd->core_lock);
1403 if (dd->suspended) {
1404 mutex_unlock(&dd->core_lock);
1405 return -EBUSY;
1406 }
1407
1408 if (dd->use_rlock)
1409 remote_mutex_lock(&dd->r_lock);
1410
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001411 clk_prepare_enable(dd->clk);
1412 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413
1414 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1415 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1416 if (spi->mode & SPI_CS_HIGH)
1417 spi_ioc |= mask;
1418 else
1419 spi_ioc &= ~mask;
1420 if (spi->mode & SPI_CPOL)
1421 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1422 else
1423 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1424
1425 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1426
1427 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1428 if (spi->mode & SPI_LOOP)
1429 spi_config |= SPI_CFG_LOOPBACK;
1430 else
1431 spi_config &= ~SPI_CFG_LOOPBACK;
1432 if (spi->mode & SPI_CPHA)
1433 spi_config &= ~SPI_CFG_INPUT_FIRST;
1434 else
1435 spi_config |= SPI_CFG_INPUT_FIRST;
1436 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1437
1438 /* Ensure previous write completed before disabling the clocks */
1439 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001440 clk_disable_unprepare(dd->clk);
1441 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442
1443 if (dd->use_rlock)
1444 remote_mutex_unlock(&dd->r_lock);
1445 mutex_unlock(&dd->core_lock);
1446
1447err_setup_exit:
1448 return rc;
1449}
1450
1451#ifdef CONFIG_DEBUG_FS
1452static int debugfs_iomem_x32_set(void *data, u64 val)
1453{
1454 writel_relaxed(val, data);
1455 /* Ensure the previous write completed. */
1456 mb();
1457 return 0;
1458}
1459
1460static int debugfs_iomem_x32_get(void *data, u64 *val)
1461{
1462 *val = readl_relaxed(data);
1463 /* Ensure the previous read completed. */
1464 mb();
1465 return 0;
1466}
1467
1468DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1469 debugfs_iomem_x32_set, "0x%08llx\n");
1470
1471static void spi_debugfs_init(struct msm_spi *dd)
1472{
1473 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1474 if (dd->dent_spi) {
1475 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1478 dd->debugfs_spi_regs[i] =
1479 debugfs_create_file(
1480 debugfs_spi_regs[i].name,
1481 debugfs_spi_regs[i].mode,
1482 dd->dent_spi,
1483 dd->base + debugfs_spi_regs[i].offset,
1484 &fops_iomem_x32);
1485 }
1486 }
1487}
1488
1489static void spi_debugfs_exit(struct msm_spi *dd)
1490{
1491 if (dd->dent_spi) {
1492 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 debugfs_remove_recursive(dd->dent_spi);
1495 dd->dent_spi = NULL;
1496 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1497 dd->debugfs_spi_regs[i] = NULL;
1498 }
1499}
1500#else
1501static void spi_debugfs_init(struct msm_spi *dd) {}
1502static void spi_debugfs_exit(struct msm_spi *dd) {}
1503#endif
1504
1505/* ===Device attributes begin=== */
1506static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1507 char *buf)
1508{
1509 struct spi_master *master = dev_get_drvdata(dev);
1510 struct msm_spi *dd = spi_master_get_devdata(master);
1511
1512 return snprintf(buf, PAGE_SIZE,
1513 "Device %s\n"
1514 "rx fifo_size = %d spi words\n"
1515 "tx fifo_size = %d spi words\n"
1516 "use_dma ? %s\n"
1517 "rx block size = %d bytes\n"
1518 "tx block size = %d bytes\n"
1519 "burst size = %d bytes\n"
1520 "DMA configuration:\n"
1521 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1522 "--statistics--\n"
1523 "Rx isrs = %d\n"
1524 "Tx isrs = %d\n"
1525 "DMA error = %d\n"
1526 "--debug--\n"
1527 "NA yet\n",
1528 dev_name(dev),
1529 dd->input_fifo_size,
1530 dd->output_fifo_size,
1531 dd->use_dma ? "yes" : "no",
1532 dd->input_block_size,
1533 dd->output_block_size,
1534 dd->burst_size,
1535 dd->tx_dma_chan,
1536 dd->rx_dma_chan,
1537 dd->tx_dma_crci,
1538 dd->rx_dma_crci,
1539 dd->stat_rx + dd->stat_dmov_rx,
1540 dd->stat_tx + dd->stat_dmov_tx,
1541 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1542 );
1543}
1544
1545/* Reset statistics on write */
1546static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1547 const char *buf, size_t count)
1548{
1549 struct msm_spi *dd = dev_get_drvdata(dev);
1550 dd->stat_rx = 0;
1551 dd->stat_tx = 0;
1552 dd->stat_dmov_rx = 0;
1553 dd->stat_dmov_tx = 0;
1554 dd->stat_dmov_rx_err = 0;
1555 dd->stat_dmov_tx_err = 0;
1556 return count;
1557}
1558
1559static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1560
1561static struct attribute *dev_attrs[] = {
1562 &dev_attr_stats.attr,
1563 NULL,
1564};
1565
1566static struct attribute_group dev_attr_grp = {
1567 .attrs = dev_attrs,
1568};
1569/* ===Device attributes end=== */
1570
1571/**
1572 * spi_dmov_tx_complete_func - DataMover tx completion callback
1573 *
1574 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1575 * spinlock @msm_dmov_lock held.
1576 */
1577static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1578 unsigned int result,
1579 struct msm_dmov_errdata *err)
1580{
1581 struct msm_spi *dd;
1582
1583 if (!(result & DMOV_RSLT_VALID)) {
1584 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1585 return;
1586 }
1587 /* restore original context */
1588 dd = container_of(cmd, struct msm_spi, tx_hdr);
1589 if (result & DMOV_RSLT_DONE)
1590 dd->stat_dmov_tx++;
1591 else {
1592 /* Error or flush */
1593 if (result & DMOV_RSLT_ERROR) {
1594 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1595 dd->stat_dmov_tx_err++;
1596 }
1597 if (result & DMOV_RSLT_FLUSH) {
1598 /*
1599 * Flushing normally happens in process of
1600 * removing, when we are waiting for outstanding
1601 * DMA commands to be flushed.
1602 */
1603 dev_info(dd->dev,
1604 "DMA channel flushed (0x%08x)\n", result);
1605 }
1606 if (err)
1607 dev_err(dd->dev,
1608 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1609 err->flush[0], err->flush[1], err->flush[2],
1610 err->flush[3], err->flush[4], err->flush[5]);
1611 dd->cur_msg->status = -EIO;
1612 complete(&dd->transfer_complete);
1613 }
1614}
1615
1616/**
1617 * spi_dmov_rx_complete_func - DataMover rx completion callback
1618 *
1619 * Executed in IRQ context (Data Mover's IRQ)
1620 * DataMover's spinlock @msm_dmov_lock held.
1621 */
1622static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1623 unsigned int result,
1624 struct msm_dmov_errdata *err)
1625{
1626 struct msm_spi *dd;
1627
1628 if (!(result & DMOV_RSLT_VALID)) {
1629 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1630 result, cmd);
1631 return;
1632 }
1633 /* restore original context */
1634 dd = container_of(cmd, struct msm_spi, rx_hdr);
1635 if (result & DMOV_RSLT_DONE) {
1636 dd->stat_dmov_rx++;
1637 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1638 return;
1639 complete(&dd->transfer_complete);
1640 } else {
1641 /** Error or flush */
1642 if (result & DMOV_RSLT_ERROR) {
1643 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1644 dd->stat_dmov_rx_err++;
1645 }
1646 if (result & DMOV_RSLT_FLUSH) {
1647 dev_info(dd->dev,
1648 "DMA channel flushed(0x%08x)\n", result);
1649 }
1650 if (err)
1651 dev_err(dd->dev,
1652 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1653 err->flush[0], err->flush[1], err->flush[2],
1654 err->flush[3], err->flush[4], err->flush[5]);
1655 dd->cur_msg->status = -EIO;
1656 complete(&dd->transfer_complete);
1657 }
1658}
1659
1660static inline u32 get_chunk_size(struct msm_spi *dd)
1661{
1662 u32 cache_line = dma_get_cache_alignment();
1663
1664 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1665 roundup(dd->burst_size, cache_line))*2;
1666}
1667
1668static void msm_spi_teardown_dma(struct msm_spi *dd)
1669{
1670 int limit = 0;
1671
1672 if (!dd->use_dma)
1673 return;
1674
1675 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001676 msm_dmov_flush(dd->tx_dma_chan, 1);
1677 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 msleep(10);
1679 }
1680
1681 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1682 dd->tx_dmov_cmd_dma);
1683 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1684 dd->tx_padding = dd->rx_padding = NULL;
1685}
1686
1687static __init int msm_spi_init_dma(struct msm_spi *dd)
1688{
1689 dmov_box *box;
1690 u32 cache_line = dma_get_cache_alignment();
1691
1692 /* Allocate all as one chunk, since all is smaller than page size */
1693
1694 /* We send NULL device, since it requires coherent_dma_mask id
1695 device definition, we're okay with using system pool */
1696 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1697 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1698 if (dd->tx_dmov_cmd == NULL)
1699 return -ENOMEM;
1700
1701 /* DMA addresses should be 64 bit aligned aligned */
1702 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1703 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1704 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1705 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1706
1707 /* Buffers should be aligned to cache line */
1708 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1709 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1710 sizeof(struct spi_dmov_cmd), cache_line);
1711 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1712 cache_line);
1713 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1714 cache_line);
1715
1716 /* Setup DM commands */
1717 box = &(dd->rx_dmov_cmd->box);
1718 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1719 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1720 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1721 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1722 offsetof(struct spi_dmov_cmd, cmd_ptr));
1723 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724
1725 box = &(dd->tx_dmov_cmd->box);
1726 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1727 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1728 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1729 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1730 offsetof(struct spi_dmov_cmd, cmd_ptr));
1731 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732
1733 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1734 CMD_DST_CRCI(dd->tx_dma_crci);
1735 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1736 SPI_OUTPUT_FIFO;
1737 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1738 CMD_SRC_CRCI(dd->rx_dma_crci);
1739 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1740 SPI_INPUT_FIFO;
1741
1742 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001743 msm_dmov_flush(dd->tx_dma_chan, 1);
1744 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745
1746 return 0;
1747}
1748
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001749struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1750{
1751 struct device_node *node = pdev->dev.of_node;
1752 struct msm_spi_platform_data *pdata;
1753
1754 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1755 if (!pdata) {
1756 pr_err("Unable to allocate platform data\n");
1757 return NULL;
1758 }
1759
1760 of_property_read_u32(node, "spi-max-frequency",
1761 &pdata->max_clock_speed);
1762
1763 return pdata;
1764}
1765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001766static int __init msm_spi_probe(struct platform_device *pdev)
1767{
1768 struct spi_master *master;
1769 struct msm_spi *dd;
1770 struct resource *resource;
1771 int rc = -ENXIO;
1772 int locked = 0;
1773 int i = 0;
1774 int clk_enabled = 0;
1775 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001776 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001777 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001778
1779 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1780 if (!master) {
1781 rc = -ENOMEM;
1782 dev_err(&pdev->dev, "master allocation failed\n");
1783 goto err_probe_exit;
1784 }
1785
1786 master->bus_num = pdev->id;
1787 master->mode_bits = SPI_SUPPORTED_MODES;
1788 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1789 master->setup = msm_spi_setup;
1790 master->transfer = msm_spi_transfer;
1791 platform_set_drvdata(pdev, master);
1792 dd = spi_master_get_devdata(master);
1793
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001794 if (pdev->dev.of_node) {
1795 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1796 master->dev.of_node = pdev->dev.of_node;
1797 pdata = msm_spi_dt_to_pdata(pdev);
1798 if (!pdata) {
1799 rc = -ENOMEM;
1800 goto err_probe_exit;
1801 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001802
1803 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1804 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1805 i, &flags);
1806 }
1807
1808 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1809 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1810 pdev->dev.of_node, "cs-gpios",
1811 i, &flags);
1812 dd->cs_gpios[i].valid = 0;
1813 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001814 } else {
1815 pdata = pdev->dev.platform_data;
1816 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001817
1818 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1819 resource = platform_get_resource(pdev, IORESOURCE_IO,
1820 i);
1821 dd->spi_gpios[i] = resource ? resource->start : -1;
1822 }
1823
1824 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1825 resource = platform_get_resource(pdev, IORESOURCE_IO,
1826 i + ARRAY_SIZE(spi_rsrcs));
1827 dd->cs_gpios[i].gpio_num = resource ?
1828 resource->start : -1;
1829 dd->cs_gpios[i].valid = 0;
1830 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001831 }
1832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001834 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001835 if (!resource) {
1836 rc = -ENXIO;
1837 goto err_probe_res;
1838 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 dd->mem_phys_addr = resource->start;
1841 dd->mem_size = resource_size(resource);
1842
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001843 if (pdata) {
1844 if (pdata->dma_config) {
1845 rc = pdata->dma_config();
1846 if (rc) {
1847 dev_warn(&pdev->dev,
1848 "%s: DM mode not supported\n",
1849 __func__);
1850 dd->use_dma = 0;
1851 goto skip_dma_resources;
1852 }
1853 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001854 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001855 if (resource) {
1856 dd->rx_dma_chan = resource->start;
1857 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001858 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1859 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860 if (!resource) {
1861 rc = -ENXIO;
1862 goto err_probe_res;
1863 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001864
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 dd->rx_dma_crci = resource->start;
1866 dd->tx_dma_crci = resource->end;
1867 dd->use_dma = 1;
1868 master->dma_alignment = dma_get_cache_alignment();
1869 }
1870
1871skip_dma_resources:
1872 if (pdata->gpio_config) {
1873 rc = pdata->gpio_config();
1874 if (rc) {
1875 dev_err(&pdev->dev,
1876 "%s: error configuring GPIOs\n",
1877 __func__);
1878 goto err_probe_gpio;
1879 }
1880 }
1881 }
1882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001883 rc = msm_spi_request_gpios(dd);
1884 if (rc)
1885 goto err_probe_gpio;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 spin_lock_init(&dd->queue_lock);
1888 mutex_init(&dd->core_lock);
1889 INIT_LIST_HEAD(&dd->queue);
1890 INIT_WORK(&dd->work_data, msm_spi_workq);
1891 init_waitqueue_head(&dd->continue_suspend);
1892 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001893 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894 if (!dd->workqueue)
1895 goto err_probe_workq;
1896
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001897 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1898 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001899 rc = -ENXIO;
1900 goto err_probe_reqmem;
1901 }
1902
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001903 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1904 if (!dd->base) {
1905 rc = -ENOMEM;
1906 goto err_probe_reqmem;
1907 }
1908
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001909 if (pdata && pdata->rsl_id) {
1910 struct remote_mutex_id rmid;
1911 rmid.r_spinlock_id = pdata->rsl_id;
1912 rmid.delay_us = SPI_TRYLOCK_DELAY;
1913
1914 rc = remote_mutex_init(&dd->r_lock, &rmid);
1915 if (rc) {
1916 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1917 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1918 __func__, rc);
1919 goto err_probe_rlock_init;
1920 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922 dd->use_rlock = 1;
1923 dd->pm_lat = pdata->pm_lat;
1924 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1925 PM_QOS_DEFAULT_VALUE);
1926 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001927
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 mutex_lock(&dd->core_lock);
1929 if (dd->use_rlock)
1930 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001932 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001933 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001934 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001936 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937 rc = PTR_ERR(dd->clk);
1938 goto err_probe_clk_get;
1939 }
1940
Matt Wagantallac294852011-08-17 15:44:58 -07001941 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001943 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944 rc = PTR_ERR(dd->pclk);
1945 goto err_probe_pclk_get;
1946 }
1947
1948 if (pdata && pdata->max_clock_speed)
1949 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
1950
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001951 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001953 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001954 __func__);
1955 goto err_probe_clk_enable;
1956 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001958 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001959 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001961 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962 __func__);
1963 goto err_probe_pclk_enable;
1964 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001967 rc = msm_spi_configure_gsbi(dd, pdev);
1968 if (rc)
1969 goto err_probe_gsbi;
1970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001971 msm_spi_calculate_fifo_size(dd);
1972 if (dd->use_dma) {
1973 rc = msm_spi_init_dma(dd);
1974 if (rc)
1975 goto err_probe_dma;
1976 }
1977
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001978 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979 /*
1980 * The SPI core generates a bogus input overrun error on some targets,
1981 * when a transition from run to reset state occurs and if the FIFO has
1982 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
1983 * bit.
1984 */
1985 msm_spi_enable_error_flags(dd);
1986
1987 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
1988 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1989 if (rc)
1990 goto err_probe_state;
1991
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001992 clk_disable_unprepare(dd->clk);
1993 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001994 clk_enabled = 0;
1995 pclk_enabled = 0;
1996
1997 dd->suspended = 0;
1998 dd->transfer_pending = 0;
1999 dd->multi_xfr = 0;
2000 dd->mode = SPI_MODE_NONE;
2001
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002002 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002003 if (rc)
2004 goto err_probe_irq;
2005
2006 msm_spi_disable_irqs(dd);
2007 if (dd->use_rlock)
2008 remote_mutex_unlock(&dd->r_lock);
2009
2010 mutex_unlock(&dd->core_lock);
2011 locked = 0;
2012
2013 rc = spi_register_master(master);
2014 if (rc)
2015 goto err_probe_reg_master;
2016
2017 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2018 if (rc) {
2019 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2020 goto err_attrs;
2021 }
2022
2023 spi_debugfs_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024 return 0;
2025
2026err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002027 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002028err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002029err_probe_irq:
2030err_probe_state:
2031 msm_spi_teardown_dma(dd);
2032err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002033err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002035 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036err_probe_pclk_enable:
2037 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002038 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002039err_probe_clk_enable:
2040 clk_put(dd->pclk);
2041err_probe_pclk_get:
2042 clk_put(dd->clk);
2043err_probe_clk_get:
2044 if (locked) {
2045 if (dd->use_rlock)
2046 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002048 mutex_unlock(&dd->core_lock);
2049 }
2050err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002051err_probe_reqmem:
2052 destroy_workqueue(dd->workqueue);
2053err_probe_workq:
2054 msm_spi_free_gpios(dd);
2055err_probe_gpio:
2056 if (pdata && pdata->gpio_release)
2057 pdata->gpio_release();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058err_probe_res:
2059 spi_master_put(master);
2060err_probe_exit:
2061 return rc;
2062}
2063
2064#ifdef CONFIG_PM
2065static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2066{
2067 struct spi_master *master = platform_get_drvdata(pdev);
2068 struct msm_spi *dd;
2069 unsigned long flags;
2070
2071 if (!master)
2072 goto suspend_exit;
2073 dd = spi_master_get_devdata(master);
2074 if (!dd)
2075 goto suspend_exit;
2076
2077 /* Make sure nothing is added to the queue while we're suspending */
2078 spin_lock_irqsave(&dd->queue_lock, flags);
2079 dd->suspended = 1;
2080 spin_unlock_irqrestore(&dd->queue_lock, flags);
2081
2082 /* Wait for transactions to end, or time out */
2083 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2084 msm_spi_free_gpios(dd);
2085
2086suspend_exit:
2087 return 0;
2088}
2089
2090static int msm_spi_resume(struct platform_device *pdev)
2091{
2092 struct spi_master *master = platform_get_drvdata(pdev);
2093 struct msm_spi *dd;
2094
2095 if (!master)
2096 goto resume_exit;
2097 dd = spi_master_get_devdata(master);
2098 if (!dd)
2099 goto resume_exit;
2100
2101 BUG_ON(msm_spi_request_gpios(dd) != 0);
2102 dd->suspended = 0;
2103resume_exit:
2104 return 0;
2105}
2106#else
2107#define msm_spi_suspend NULL
2108#define msm_spi_resume NULL
2109#endif /* CONFIG_PM */
2110
2111static int __devexit msm_spi_remove(struct platform_device *pdev)
2112{
2113 struct spi_master *master = platform_get_drvdata(pdev);
2114 struct msm_spi *dd = spi_master_get_devdata(master);
2115 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2116
2117 pm_qos_remove_request(&qos_req_list);
2118 spi_debugfs_exit(dd);
2119 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122 if (pdata && pdata->gpio_release)
2123 pdata->gpio_release();
2124
2125 msm_spi_free_gpios(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126 clk_put(dd->clk);
2127 clk_put(dd->pclk);
2128 destroy_workqueue(dd->workqueue);
2129 platform_set_drvdata(pdev, 0);
2130 spi_unregister_master(master);
2131 spi_master_put(master);
2132
2133 return 0;
2134}
2135
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002136static struct of_device_id msm_spi_dt_match[] = {
2137 {
2138 .compatible = "qcom,spi-qup-v2",
2139 },
2140 {}
2141};
2142
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002143static struct platform_driver msm_spi_driver = {
2144 .driver = {
2145 .name = SPI_DRV_NAME,
2146 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002147 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002148 },
2149 .suspend = msm_spi_suspend,
2150 .resume = msm_spi_resume,
2151 .remove = __exit_p(msm_spi_remove),
2152};
2153
2154static int __init msm_spi_init(void)
2155{
2156 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2157}
2158module_init(msm_spi_init);
2159
2160static void __exit msm_spi_exit(void)
2161{
2162 platform_driver_unregister(&msm_spi_driver);
2163}
2164module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002165
2166MODULE_LICENSE("GPL v2");
2167MODULE_VERSION("0.4");
2168MODULE_ALIAS("platform:"SPI_DRV_NAME);