blob: cb0e9362ef7a5c84db738c4770192270b58ed188 [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/list.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/interrupt.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/workqueue.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <mach/msm_spi.h>
33#include <linux/dma-mapping.h>
34#include <linux/sched.h>
35#include <mach/dma.h>
36#include <asm/atomic.h>
37#include <linux/mutex.h>
38#include <linux/gpio.h>
39#include <linux/remote_spinlock.h>
40#include <linux/pm_qos_params.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070041#include <linux/of.h>
42#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
45 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046{
47 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070048 unsigned long gsbi_mem_phys_addr;
49 size_t gsbi_mem_size;
50 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070052 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 return 0;
55
56 gsbi_mem_phys_addr = resource->start;
57 gsbi_mem_size = resource_size(resource);
58 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
59 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070061
62 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
63 gsbi_mem_size);
64 if (!gsbi_base)
65 return -ENXIO;
66
67 /* Set GSBI to SPI mode */
68 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069
70 return 0;
71}
72
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070073static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
76 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
77 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
78 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
79 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
80 if (dd->qup_ver)
81 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082}
83
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084static inline int msm_spi_request_gpios(struct msm_spi *dd)
85{
86 int i;
87 int result = 0;
88
89 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
90 if (dd->spi_gpios[i] >= 0) {
91 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
92 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060093 dev_err(dd->dev, "%s: gpio_request for pin %d "
94 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095 dd->spi_gpios[i], result);
96 goto error;
97 }
98 }
99 }
100 return 0;
101
102error:
103 for (; --i >= 0;) {
104 if (dd->spi_gpios[i] >= 0)
105 gpio_free(dd->spi_gpios[i]);
106 }
107 return result;
108}
109
110static inline void msm_spi_free_gpios(struct msm_spi *dd)
111{
112 int i;
113
114 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
115 if (dd->spi_gpios[i] >= 0)
116 gpio_free(dd->spi_gpios[i]);
117 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600118
119 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
120 if (dd->cs_gpios[i].valid) {
121 gpio_free(dd->cs_gpios[i].gpio_num);
122 dd->cs_gpios[i].valid = 0;
123 }
124 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125}
126
127static void msm_spi_clock_set(struct msm_spi *dd, int speed)
128{
129 int rc;
130
131 rc = clk_set_rate(dd->clk, speed);
132 if (!rc)
133 dd->clock_speed = speed;
134}
135
136static int msm_spi_calculate_size(int *fifo_size,
137 int *block_size,
138 int block,
139 int mult)
140{
141 int words;
142
143 switch (block) {
144 case 0:
145 words = 1; /* 4 bytes */
146 break;
147 case 1:
148 words = 4; /* 16 bytes */
149 break;
150 case 2:
151 words = 8; /* 32 bytes */
152 break;
153 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700154 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 switch (mult) {
158 case 0:
159 *fifo_size = words * 2;
160 break;
161 case 1:
162 *fifo_size = words * 4;
163 break;
164 case 2:
165 *fifo_size = words * 8;
166 break;
167 case 3:
168 *fifo_size = words * 16;
169 break;
170 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700171 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 *block_size = words * sizeof(u32); /* in bytes */
175 return 0;
176}
177
178static void get_next_transfer(struct msm_spi *dd)
179{
180 struct spi_transfer *t = dd->cur_transfer;
181
182 if (t->transfer_list.next != &dd->cur_msg->transfers) {
183 dd->cur_transfer = list_entry(t->transfer_list.next,
184 struct spi_transfer,
185 transfer_list);
186 dd->write_buf = dd->cur_transfer->tx_buf;
187 dd->read_buf = dd->cur_transfer->rx_buf;
188 }
189}
190
191static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
192{
193 u32 spi_iom;
194 int block;
195 int mult;
196
197 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
198
199 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
200 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
201 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
202 block, mult)) {
203 goto fifo_size_err;
204 }
205
206 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
207 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
208 if (msm_spi_calculate_size(&dd->output_fifo_size,
209 &dd->output_block_size, block, mult)) {
210 goto fifo_size_err;
211 }
212 /* DM mode is not available for this block size */
213 if (dd->input_block_size == 4 || dd->output_block_size == 4)
214 dd->use_dma = 0;
215
216 /* DM mode is currently unsupported for different block sizes */
217 if (dd->input_block_size != dd->output_block_size)
218 dd->use_dma = 0;
219
220 if (dd->use_dma)
221 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
222
223 return;
224
225fifo_size_err:
226 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700227 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 return;
229}
230
231static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
232{
233 u32 data_in;
234 int i;
235 int shift;
236
237 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
238 if (dd->read_buf) {
239 for (i = 0; (i < dd->bytes_per_word) &&
240 dd->rx_bytes_remaining; i++) {
241 /* The data format depends on bytes_per_word:
242 4 bytes: 0x12345678
243 3 bytes: 0x00123456
244 2 bytes: 0x00001234
245 1 byte : 0x00000012
246 */
247 shift = 8 * (dd->bytes_per_word - i - 1);
248 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
249 dd->rx_bytes_remaining--;
250 }
251 } else {
252 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
253 dd->rx_bytes_remaining -= dd->bytes_per_word;
254 else
255 dd->rx_bytes_remaining = 0;
256 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 dd->read_xfr_cnt++;
259 if (dd->multi_xfr) {
260 if (!dd->rx_bytes_remaining)
261 dd->read_xfr_cnt = 0;
262 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
263 dd->read_len) {
264 struct spi_transfer *t = dd->cur_rx_transfer;
265 if (t->transfer_list.next != &dd->cur_msg->transfers) {
266 t = list_entry(t->transfer_list.next,
267 struct spi_transfer,
268 transfer_list);
269 dd->read_buf = t->rx_buf;
270 dd->read_len = t->len;
271 dd->read_xfr_cnt = 0;
272 dd->cur_rx_transfer = t;
273 }
274 }
275 }
276}
277
278static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
279{
280 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
281
282 return spi_op & SPI_OP_STATE_VALID;
283}
284
285static inline int msm_spi_wait_valid(struct msm_spi *dd)
286{
287 unsigned long delay = 0;
288 unsigned long timeout = 0;
289
290 if (dd->clock_speed == 0)
291 return -EINVAL;
292 /*
293 * Based on the SPI clock speed, sufficient time
294 * should be given for the SPI state transition
295 * to occur
296 */
297 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
298 /*
299 * For small delay values, the default timeout would
300 * be one jiffy
301 */
302 if (delay < SPI_DELAY_THRESHOLD)
303 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600304
305 /* Adding one to round off to the nearest jiffy */
306 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307 while (!msm_spi_is_valid_state(dd)) {
308 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600309 if (!msm_spi_is_valid_state(dd)) {
310 if (dd->cur_msg)
311 dd->cur_msg->status = -EIO;
312 dev_err(dd->dev, "%s: SPI operational state"
313 "not valid\n", __func__);
314 return -ETIMEDOUT;
315 } else
316 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 }
318 /*
319 * For smaller values of delay, context switch time
320 * would negate the usage of usleep
321 */
322 if (delay > 20)
323 usleep(delay);
324 else if (delay)
325 udelay(delay);
326 }
327 return 0;
328}
329
330static inline int msm_spi_set_state(struct msm_spi *dd,
331 enum msm_spi_state state)
332{
333 enum msm_spi_state cur_state;
334 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700335 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 cur_state = readl_relaxed(dd->base + SPI_STATE);
337 /* Per spec:
338 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
339 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
340 (state == SPI_OP_STATE_RESET)) {
341 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
342 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
343 } else {
344 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
345 dd->base + SPI_STATE);
346 }
347 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700348 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349
350 return 0;
351}
352
353static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
354{
355 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
356
357 if (n != (*config & SPI_CFG_N))
358 *config = (*config & ~SPI_CFG_N) | n;
359
360 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
361 if (dd->read_buf == NULL)
362 *config |= SPI_NO_INPUT;
363 if (dd->write_buf == NULL)
364 *config |= SPI_NO_OUTPUT;
365 }
366}
367
368static void msm_spi_set_config(struct msm_spi *dd, int bpw)
369{
370 u32 spi_config;
371
372 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
373
374 if (dd->cur_msg->spi->mode & SPI_CPHA)
375 spi_config &= ~SPI_CFG_INPUT_FIRST;
376 else
377 spi_config |= SPI_CFG_INPUT_FIRST;
378 if (dd->cur_msg->spi->mode & SPI_LOOP)
379 spi_config |= SPI_CFG_LOOPBACK;
380 else
381 spi_config &= ~SPI_CFG_LOOPBACK;
382 msm_spi_add_configs(dd, &spi_config, bpw-1);
383 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
384 msm_spi_set_qup_config(dd, bpw);
385}
386
387static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
388{
389 dmov_box *box;
390 int bytes_to_send, num_rows, bytes_sent;
391 u32 num_transfers;
392
393 atomic_set(&dd->rx_irq_called, 0);
394 if (dd->write_len && !dd->read_len) {
395 /* WR-WR transfer */
396 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
397 dd->write_buf = dd->temp_buf;
398 } else {
399 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
400 /* For WR-RD transfer, bytes_sent can be negative */
401 if (bytes_sent < 0)
402 bytes_sent = 0;
403 }
404
405 /* We'll send in chunks of SPI_MAX_LEN if larger */
406 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
407 SPI_MAX_LEN : dd->tx_bytes_remaining;
408 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
409 dd->unaligned_len = bytes_to_send % dd->burst_size;
410 num_rows = bytes_to_send / dd->burst_size;
411
412 dd->mode = SPI_DMOV_MODE;
413
414 if (num_rows) {
415 /* src in 16 MSB, dst in 16 LSB */
416 box = &dd->tx_dmov_cmd->box;
417 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
418 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
419 box->num_rows = (num_rows << 16) | num_rows;
420 box->row_offset = (dd->burst_size << 16) | 0;
421
422 box = &dd->rx_dmov_cmd->box;
423 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
424 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
425 box->num_rows = (num_rows << 16) | num_rows;
426 box->row_offset = (0 << 16) | dd->burst_size;
427
428 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
429 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
430 offsetof(struct spi_dmov_cmd, box));
431 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
432 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
433 offsetof(struct spi_dmov_cmd, box));
434 } else {
435 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
436 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
437 offsetof(struct spi_dmov_cmd, single_pad));
438 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
439 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
440 offsetof(struct spi_dmov_cmd, single_pad));
441 }
442
443 if (!dd->unaligned_len) {
444 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
445 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
446 } else {
447 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
448 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
449 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
450
451 if ((dd->multi_xfr) && (dd->read_len <= 0))
452 offset = dd->cur_msg_len - dd->unaligned_len;
453
454 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
455 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
456
457 memset(dd->tx_padding, 0, dd->burst_size);
458 memset(dd->rx_padding, 0, dd->burst_size);
459 if (dd->write_buf)
460 memcpy(dd->tx_padding, dd->write_buf + offset,
461 dd->unaligned_len);
462
463 tx_cmd->src = dd->tx_padding_dma;
464 rx_cmd->dst = dd->rx_padding_dma;
465 tx_cmd->len = rx_cmd->len = dd->burst_size;
466 }
467 /* This also takes care of the padding dummy buf
468 Since this is set to the correct length, the
469 dummy bytes won't be actually sent */
470 if (dd->multi_xfr) {
471 u32 write_transfers = 0;
472 u32 read_transfers = 0;
473
474 if (dd->write_len > 0) {
475 write_transfers = DIV_ROUND_UP(dd->write_len,
476 dd->bytes_per_word);
477 writel_relaxed(write_transfers,
478 dd->base + SPI_MX_OUTPUT_COUNT);
479 }
480 if (dd->read_len > 0) {
481 /*
482 * The read following a write transfer must take
483 * into account, that the bytes pertaining to
484 * the write transfer needs to be discarded,
485 * before the actual read begins.
486 */
487 read_transfers = DIV_ROUND_UP(dd->read_len +
488 dd->write_len,
489 dd->bytes_per_word);
490 writel_relaxed(read_transfers,
491 dd->base + SPI_MX_INPUT_COUNT);
492 }
493 } else {
494 if (dd->write_buf)
495 writel_relaxed(num_transfers,
496 dd->base + SPI_MX_OUTPUT_COUNT);
497 if (dd->read_buf)
498 writel_relaxed(num_transfers,
499 dd->base + SPI_MX_INPUT_COUNT);
500 }
501}
502
503static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
504{
505 dma_coherent_pre_ops();
506 if (dd->write_buf)
507 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
508 if (dd->read_buf)
509 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
510}
511
512/* SPI core can send maximum of 4K transfers, because there is HW problem
513 with infinite mode.
514 Therefore, we are sending several chunks of 3K or less (depending on how
515 much is left).
516 Upon completion we send the next chunk, or complete the transfer if
517 everything is finished.
518*/
519static int msm_spi_dm_send_next(struct msm_spi *dd)
520{
521 /* By now we should have sent all the bytes in FIFO mode,
522 * However to make things right, we'll check anyway.
523 */
524 if (dd->mode != SPI_DMOV_MODE)
525 return 0;
526
527 /* We need to send more chunks, if we sent max last time */
528 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
529 dd->tx_bytes_remaining -= SPI_MAX_LEN;
530 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
531 return 0;
532 dd->read_len = dd->write_len = 0;
533 msm_spi_setup_dm_transfer(dd);
534 msm_spi_enqueue_dm_commands(dd);
535 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
536 return 0;
537 return 1;
538 } else if (dd->read_len && dd->write_len) {
539 dd->tx_bytes_remaining -= dd->cur_transfer->len;
540 if (list_is_last(&dd->cur_transfer->transfer_list,
541 &dd->cur_msg->transfers))
542 return 0;
543 get_next_transfer(dd);
544 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
545 return 0;
546 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
547 dd->read_buf = dd->temp_buf;
548 dd->read_len = dd->write_len = -1;
549 msm_spi_setup_dm_transfer(dd);
550 msm_spi_enqueue_dm_commands(dd);
551 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
552 return 0;
553 return 1;
554 }
555 return 0;
556}
557
558static inline void msm_spi_ack_transfer(struct msm_spi *dd)
559{
560 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
561 SPI_OP_MAX_OUTPUT_DONE_FLAG,
562 dd->base + SPI_OPERATIONAL);
563 /* Ensure done flag was cleared before proceeding further */
564 mb();
565}
566
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700567/* Figure which irq occured and call the relevant functions */
568static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
569{
570 u32 op, ret = IRQ_NONE;
571 struct msm_spi *dd = dev_id;
572
573 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
574 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
575 struct spi_master *master = dev_get_drvdata(dd->dev);
576 ret |= msm_spi_error_irq(irq, master);
577 }
578
579 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
580 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
581 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
582 dd->base + SPI_OPERATIONAL);
583 /*
584 * Ensure service flag was cleared before further
585 * processing of interrupt.
586 */
587 mb();
588 ret |= msm_spi_input_irq(irq, dev_id);
589 }
590
591 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
592 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
593 dd->base + SPI_OPERATIONAL);
594 /*
595 * Ensure service flag was cleared before further
596 * processing of interrupt.
597 */
598 mb();
599 ret |= msm_spi_output_irq(irq, dev_id);
600 }
601
602 if (dd->done) {
603 complete(&dd->transfer_complete);
604 dd->done = 0;
605 }
606 return ret;
607}
608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
610{
611 struct msm_spi *dd = dev_id;
612
613 dd->stat_rx++;
614
615 if (dd->mode == SPI_MODE_NONE)
616 return IRQ_HANDLED;
617
618 if (dd->mode == SPI_DMOV_MODE) {
619 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
620 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
621 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
622 msm_spi_ack_transfer(dd);
623 if (dd->unaligned_len == 0) {
624 if (atomic_inc_return(&dd->rx_irq_called) == 1)
625 return IRQ_HANDLED;
626 }
627 msm_spi_complete(dd);
628 return IRQ_HANDLED;
629 }
630 return IRQ_NONE;
631 }
632
633 if (dd->mode == SPI_FIFO_MODE) {
634 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
635 SPI_OP_IP_FIFO_NOT_EMPTY) &&
636 (dd->rx_bytes_remaining > 0)) {
637 msm_spi_read_word_from_fifo(dd);
638 }
639 if (dd->rx_bytes_remaining == 0)
640 msm_spi_complete(dd);
641 }
642
643 return IRQ_HANDLED;
644}
645
646static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
647{
648 u32 word;
649 u8 byte;
650 int i;
651
652 word = 0;
653 if (dd->write_buf) {
654 for (i = 0; (i < dd->bytes_per_word) &&
655 dd->tx_bytes_remaining; i++) {
656 dd->tx_bytes_remaining--;
657 byte = *dd->write_buf++;
658 word |= (byte << (BITS_PER_BYTE * (3 - i)));
659 }
660 } else
661 if (dd->tx_bytes_remaining > dd->bytes_per_word)
662 dd->tx_bytes_remaining -= dd->bytes_per_word;
663 else
664 dd->tx_bytes_remaining = 0;
665 dd->write_xfr_cnt++;
666 if (dd->multi_xfr) {
667 if (!dd->tx_bytes_remaining)
668 dd->write_xfr_cnt = 0;
669 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
670 dd->write_len) {
671 struct spi_transfer *t = dd->cur_tx_transfer;
672 if (t->transfer_list.next != &dd->cur_msg->transfers) {
673 t = list_entry(t->transfer_list.next,
674 struct spi_transfer,
675 transfer_list);
676 dd->write_buf = t->tx_buf;
677 dd->write_len = t->len;
678 dd->write_xfr_cnt = 0;
679 dd->cur_tx_transfer = t;
680 }
681 }
682 }
683 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
684}
685
686static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
687{
688 int count = 0;
689
690 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
691 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
692 SPI_OP_OUTPUT_FIFO_FULL)) {
693 msm_spi_write_word_to_fifo(dd);
694 count++;
695 }
696}
697
698static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
699{
700 struct msm_spi *dd = dev_id;
701
702 dd->stat_tx++;
703
704 if (dd->mode == SPI_MODE_NONE)
705 return IRQ_HANDLED;
706
707 if (dd->mode == SPI_DMOV_MODE) {
708 /* TX_ONLY transaction is handled here
709 This is the only place we send complete at tx and not rx */
710 if (dd->read_buf == NULL &&
711 readl_relaxed(dd->base + SPI_OPERATIONAL) &
712 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
713 msm_spi_ack_transfer(dd);
714 msm_spi_complete(dd);
715 return IRQ_HANDLED;
716 }
717 return IRQ_NONE;
718 }
719
720 /* Output FIFO is empty. Transmit any outstanding write data. */
721 if (dd->mode == SPI_FIFO_MODE)
722 msm_spi_write_rmn_to_fifo(dd);
723
724 return IRQ_HANDLED;
725}
726
727static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
728{
729 struct spi_master *master = dev_id;
730 struct msm_spi *dd = spi_master_get_devdata(master);
731 u32 spi_err;
732
733 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
734 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
735 dev_warn(master->dev.parent, "SPI output overrun error\n");
736 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
737 dev_warn(master->dev.parent, "SPI input underrun error\n");
738 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
739 dev_warn(master->dev.parent, "SPI output underrun error\n");
740 msm_spi_get_clk_err(dd, &spi_err);
741 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
742 dev_warn(master->dev.parent, "SPI clock overrun error\n");
743 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
744 dev_warn(master->dev.parent, "SPI clock underrun error\n");
745 msm_spi_clear_error_flags(dd);
746 msm_spi_ack_clk_err(dd);
747 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
748 mb();
749 return IRQ_HANDLED;
750}
751
752static int msm_spi_map_dma_buffers(struct msm_spi *dd)
753{
754 struct device *dev;
755 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600756 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 void *tx_buf, *rx_buf;
758 unsigned tx_len, rx_len;
759 int ret = -EINVAL;
760
761 dev = &dd->cur_msg->spi->dev;
762 first_xfr = dd->cur_transfer;
763 tx_buf = (void *)first_xfr->tx_buf;
764 rx_buf = first_xfr->rx_buf;
765 tx_len = rx_len = first_xfr->len;
766
767 /*
768 * For WR-WR and WR-RD transfers, we allocate our own temporary
769 * buffer and copy the data to/from the client buffers.
770 */
771 if (dd->multi_xfr) {
772 dd->temp_buf = kzalloc(dd->cur_msg_len,
773 GFP_KERNEL | __GFP_DMA);
774 if (!dd->temp_buf)
775 return -ENOMEM;
776 nxt_xfr = list_entry(first_xfr->transfer_list.next,
777 struct spi_transfer, transfer_list);
778
779 if (dd->write_len && !dd->read_len) {
780 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
781 goto error;
782
783 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
784 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
785 nxt_xfr->len);
786 tx_buf = dd->temp_buf;
787 tx_len = dd->cur_msg_len;
788 } else {
789 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
790 goto error;
791
792 rx_buf = dd->temp_buf;
793 rx_len = dd->cur_msg_len;
794 }
795 }
796 if (tx_buf != NULL) {
797 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
798 tx_len, DMA_TO_DEVICE);
799 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
800 dev_err(dev, "dma %cX %d bytes error\n",
801 'T', tx_len);
802 ret = -ENOMEM;
803 goto error;
804 }
805 }
806 if (rx_buf != NULL) {
807 dma_addr_t dma_handle;
808 dma_handle = dma_map_single(dev, rx_buf,
809 rx_len, DMA_FROM_DEVICE);
810 if (dma_mapping_error(NULL, dma_handle)) {
811 dev_err(dev, "dma %cX %d bytes error\n",
812 'R', rx_len);
813 if (tx_buf != NULL)
814 dma_unmap_single(NULL, first_xfr->tx_dma,
815 tx_len, DMA_TO_DEVICE);
816 ret = -ENOMEM;
817 goto error;
818 }
819 if (dd->multi_xfr)
820 nxt_xfr->rx_dma = dma_handle;
821 else
822 first_xfr->rx_dma = dma_handle;
823 }
824 return 0;
825
826error:
827 kfree(dd->temp_buf);
828 dd->temp_buf = NULL;
829 return ret;
830}
831
832static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
833{
834 struct device *dev;
835 u32 offset;
836
837 dev = &dd->cur_msg->spi->dev;
838 if (dd->cur_msg->is_dma_mapped)
839 goto unmap_end;
840
841 if (dd->multi_xfr) {
842 if (dd->write_len && !dd->read_len) {
843 dma_unmap_single(dev,
844 dd->cur_transfer->tx_dma,
845 dd->cur_msg_len,
846 DMA_TO_DEVICE);
847 } else {
848 struct spi_transfer *prev_xfr;
849 prev_xfr = list_entry(
850 dd->cur_transfer->transfer_list.prev,
851 struct spi_transfer,
852 transfer_list);
853 if (dd->cur_transfer->rx_buf) {
854 dma_unmap_single(dev,
855 dd->cur_transfer->rx_dma,
856 dd->cur_msg_len,
857 DMA_FROM_DEVICE);
858 }
859 if (prev_xfr->tx_buf) {
860 dma_unmap_single(dev,
861 prev_xfr->tx_dma,
862 prev_xfr->len,
863 DMA_TO_DEVICE);
864 }
865 if (dd->unaligned_len && dd->read_buf) {
866 offset = dd->cur_msg_len - dd->unaligned_len;
867 dma_coherent_post_ops();
868 memcpy(dd->read_buf + offset, dd->rx_padding,
869 dd->unaligned_len);
870 memcpy(dd->cur_transfer->rx_buf,
871 dd->read_buf + prev_xfr->len,
872 dd->cur_transfer->len);
873 }
874 }
875 kfree(dd->temp_buf);
876 dd->temp_buf = NULL;
877 return;
878 } else {
879 if (dd->cur_transfer->rx_buf)
880 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
881 dd->cur_transfer->len,
882 DMA_FROM_DEVICE);
883 if (dd->cur_transfer->tx_buf)
884 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
885 dd->cur_transfer->len,
886 DMA_TO_DEVICE);
887 }
888
889unmap_end:
890 /* If we padded the transfer, we copy it from the padding buf */
891 if (dd->unaligned_len && dd->read_buf) {
892 offset = dd->cur_transfer->len - dd->unaligned_len;
893 dma_coherent_post_ops();
894 memcpy(dd->read_buf + offset, dd->rx_padding,
895 dd->unaligned_len);
896 }
897}
898
899/**
900 * msm_use_dm - decides whether to use data mover for this
901 * transfer
902 * @dd: device
903 * @tr: transfer
904 *
905 * Start using DM if:
906 * 1. Transfer is longer than 3*block size.
907 * 2. Buffers should be aligned to cache line.
908 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
909 */
910static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
911 u8 bpw)
912{
913 u32 cache_line = dma_get_cache_alignment();
914
915 if (!dd->use_dma)
916 return 0;
917
918 if (dd->cur_msg_len < 3*dd->input_block_size)
919 return 0;
920
921 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
922 return 0;
923
924 if (tr->tx_buf) {
925 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
926 return 0;
927 }
928 if (tr->rx_buf) {
929 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
930 return 0;
931 }
932
933 if (tr->cs_change &&
934 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
935 return 0;
936 return 1;
937}
938
939static void msm_spi_process_transfer(struct msm_spi *dd)
940{
941 u8 bpw;
942 u32 spi_ioc;
943 u32 spi_iom;
944 u32 spi_ioc_orig;
945 u32 max_speed;
946 u32 chip_select;
947 u32 read_count;
948 u32 timeout;
949 u32 int_loopback = 0;
950
951 dd->tx_bytes_remaining = dd->cur_msg_len;
952 dd->rx_bytes_remaining = dd->cur_msg_len;
953 dd->read_buf = dd->cur_transfer->rx_buf;
954 dd->write_buf = dd->cur_transfer->tx_buf;
955 init_completion(&dd->transfer_complete);
956 if (dd->cur_transfer->bits_per_word)
957 bpw = dd->cur_transfer->bits_per_word;
958 else
959 if (dd->cur_msg->spi->bits_per_word)
960 bpw = dd->cur_msg->spi->bits_per_word;
961 else
962 bpw = 8;
963 dd->bytes_per_word = (bpw + 7) / 8;
964
965 if (dd->cur_transfer->speed_hz)
966 max_speed = dd->cur_transfer->speed_hz;
967 else
968 max_speed = dd->cur_msg->spi->max_speed_hz;
969 if (!dd->clock_speed || max_speed != dd->clock_speed)
970 msm_spi_clock_set(dd, max_speed);
971
972 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
973 if (dd->cur_msg->spi->mode & SPI_LOOP)
974 int_loopback = 1;
975 if (int_loopback && dd->multi_xfr &&
976 (read_count > dd->input_fifo_size)) {
977 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700978 pr_err(
979 "%s:Internal Loopback does not support > fifo size"
980 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 __func__);
982 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700983 pr_err(
984 "%s:Internal Loopback does not support > fifo size"
985 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 __func__);
987 return;
988 }
989 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
990 dd->mode = SPI_FIFO_MODE;
991 if (dd->multi_xfr) {
992 dd->read_len = dd->cur_transfer->len;
993 dd->write_len = dd->cur_transfer->len;
994 }
995 /* read_count cannot exceed fifo_size, and only one READ COUNT
996 interrupt is generated per transaction, so for transactions
997 larger than fifo size READ COUNT must be disabled.
998 For those transactions we usually move to Data Mover mode.
999 */
1000 if (read_count <= dd->input_fifo_size) {
1001 writel_relaxed(read_count,
1002 dd->base + SPI_MX_READ_COUNT);
1003 msm_spi_set_write_count(dd, read_count);
1004 } else {
1005 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1006 msm_spi_set_write_count(dd, 0);
1007 }
1008 } else {
1009 dd->mode = SPI_DMOV_MODE;
1010 if (dd->write_len && dd->read_len) {
1011 dd->tx_bytes_remaining = dd->write_len;
1012 dd->rx_bytes_remaining = dd->read_len;
1013 }
1014 }
1015
1016 /* Write mode - fifo or data mover*/
1017 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1018 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1019 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1020 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1021 /* Turn on packing for data mover */
1022 if (dd->mode == SPI_DMOV_MODE)
1023 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1024 else
1025 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1026 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1027
1028 msm_spi_set_config(dd, bpw);
1029
1030 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1031 spi_ioc_orig = spi_ioc;
1032 if (dd->cur_msg->spi->mode & SPI_CPOL)
1033 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1034 else
1035 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1036 chip_select = dd->cur_msg->spi->chip_select << 2;
1037 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1038 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1039 if (!dd->cur_transfer->cs_change)
1040 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1041 if (spi_ioc != spi_ioc_orig)
1042 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1043
1044 if (dd->mode == SPI_DMOV_MODE) {
1045 msm_spi_setup_dm_transfer(dd);
1046 msm_spi_enqueue_dm_commands(dd);
1047 }
1048 /* The output fifo interrupt handler will handle all writes after
1049 the first. Restricting this to one write avoids contention
1050 issues and race conditions between this thread and the int handler
1051 */
1052 else if (dd->mode == SPI_FIFO_MODE) {
1053 if (msm_spi_prepare_for_write(dd))
1054 goto transfer_end;
1055 msm_spi_start_write(dd, read_count);
1056 }
1057
1058 /* Only enter the RUN state after the first word is written into
1059 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1060 might fire before the first word is written resulting in a
1061 possible race condition.
1062 */
1063 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1064 goto transfer_end;
1065
1066 timeout = 100 * msecs_to_jiffies(
1067 DIV_ROUND_UP(dd->cur_msg_len * 8,
1068 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1069
1070 /* Assume success, this might change later upon transaction result */
1071 dd->cur_msg->status = 0;
1072 do {
1073 if (!wait_for_completion_timeout(&dd->transfer_complete,
1074 timeout)) {
1075 dev_err(dd->dev, "%s: SPI transaction "
1076 "timeout\n", __func__);
1077 dd->cur_msg->status = -EIO;
1078 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001079 msm_dmov_flush(dd->tx_dma_chan, 1);
1080 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 }
1082 break;
1083 }
1084 } while (msm_spi_dm_send_next(dd));
1085
1086transfer_end:
1087 if (dd->mode == SPI_DMOV_MODE)
1088 msm_spi_unmap_dma_buffers(dd);
1089 dd->mode = SPI_MODE_NONE;
1090
1091 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1092 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1093 dd->base + SPI_IO_CONTROL);
1094}
1095
1096static void get_transfer_length(struct msm_spi *dd)
1097{
1098 struct spi_transfer *tr;
1099 int num_xfrs = 0;
1100 int readlen = 0;
1101 int writelen = 0;
1102
1103 dd->cur_msg_len = 0;
1104 dd->multi_xfr = 0;
1105 dd->read_len = dd->write_len = 0;
1106
1107 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1108 if (tr->tx_buf)
1109 writelen += tr->len;
1110 if (tr->rx_buf)
1111 readlen += tr->len;
1112 dd->cur_msg_len += tr->len;
1113 num_xfrs++;
1114 }
1115
1116 if (num_xfrs == 2) {
1117 struct spi_transfer *first_xfr = dd->cur_transfer;
1118
1119 dd->multi_xfr = 1;
1120 tr = list_entry(first_xfr->transfer_list.next,
1121 struct spi_transfer,
1122 transfer_list);
1123 /*
1124 * We update dd->read_len and dd->write_len only
1125 * for WR-WR and WR-RD transfers.
1126 */
1127 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1128 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1129 ((!tr->tx_buf) && (tr->rx_buf))) {
1130 dd->read_len = readlen;
1131 dd->write_len = writelen;
1132 }
1133 }
1134 } else if (num_xfrs > 1)
1135 dd->multi_xfr = 1;
1136}
1137
1138static inline int combine_transfers(struct msm_spi *dd)
1139{
1140 struct spi_transfer *t = dd->cur_transfer;
1141 struct spi_transfer *nxt;
1142 int xfrs_grped = 1;
1143
1144 dd->cur_msg_len = dd->cur_transfer->len;
1145 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1146 nxt = list_entry(t->transfer_list.next,
1147 struct spi_transfer,
1148 transfer_list);
1149 if (t->cs_change != nxt->cs_change)
1150 return xfrs_grped;
1151 dd->cur_msg_len += nxt->len;
1152 xfrs_grped++;
1153 t = nxt;
1154 }
1155 return xfrs_grped;
1156}
1157
Harini Jayaraman093938a2012-04-20 15:33:23 -06001158static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1159{
1160 u32 spi_ioc;
1161 u32 spi_ioc_orig;
1162
1163 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1164 spi_ioc_orig = spi_ioc;
1165 if (set_flag)
1166 spi_ioc |= SPI_IO_C_FORCE_CS;
1167 else
1168 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1169
1170 if (spi_ioc != spi_ioc_orig)
1171 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1172}
1173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174static void msm_spi_process_message(struct msm_spi *dd)
1175{
1176 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001177 int cs_num;
1178 int rc;
1179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001181 cs_num = dd->cur_msg->spi->chip_select;
1182 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1183 (!(dd->cs_gpios[cs_num].valid)) &&
1184 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1185 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1186 spi_cs_rsrcs[cs_num]);
1187 if (rc) {
1188 dev_err(dd->dev, "gpio_request for pin %d failed with "
1189 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1190 rc);
1191 return;
1192 }
1193 dd->cs_gpios[cs_num].valid = 1;
1194 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001196 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001197 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001199 &dd->cur_msg->transfers,
1200 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001201 struct spi_transfer *t = dd->cur_transfer;
1202 struct spi_transfer *nxt;
1203
1204 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1205 nxt = list_entry(t->transfer_list.next,
1206 struct spi_transfer,
1207 transfer_list);
1208
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001209 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001210 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001211 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001212 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001214
1215 dd->cur_msg_len = dd->cur_transfer->len;
1216 msm_spi_process_transfer(dd);
1217 }
1218 } else {
1219 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1220 struct spi_transfer,
1221 transfer_list);
1222 get_transfer_length(dd);
1223 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1224 /*
1225 * Handling of multi-transfers.
1226 * FIFO mode is used by default
1227 */
1228 list_for_each_entry(dd->cur_transfer,
1229 &dd->cur_msg->transfers,
1230 transfer_list) {
1231 if (!dd->cur_transfer->len)
1232 goto error;
1233 if (xfrs_grped) {
1234 xfrs_grped--;
1235 continue;
1236 } else {
1237 dd->read_len = dd->write_len = 0;
1238 xfrs_grped = combine_transfers(dd);
1239 }
1240
1241 dd->cur_tx_transfer = dd->cur_transfer;
1242 dd->cur_rx_transfer = dd->cur_transfer;
1243 msm_spi_process_transfer(dd);
1244 xfrs_grped--;
1245 }
1246 } else {
1247 /* Handling of a single transfer or
1248 * WR-WR or WR-RD transfers
1249 */
1250 if ((!dd->cur_msg->is_dma_mapped) &&
1251 (msm_use_dm(dd, dd->cur_transfer,
1252 dd->cur_transfer->bits_per_word))) {
1253 /* Mapping of DMA buffers */
1254 int ret = msm_spi_map_dma_buffers(dd);
1255 if (ret < 0) {
1256 dd->cur_msg->status = ret;
1257 goto error;
1258 }
1259 }
1260
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261 dd->cur_tx_transfer = dd->cur_transfer;
1262 dd->cur_rx_transfer = dd->cur_transfer;
1263 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001266
1267 return;
1268
1269error:
1270 if (dd->cs_gpios[cs_num].valid) {
1271 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1272 dd->cs_gpios[cs_num].valid = 0;
1273 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001274}
1275
1276/* workqueue - pull messages from queue & process */
1277static void msm_spi_workq(struct work_struct *work)
1278{
1279 struct msm_spi *dd =
1280 container_of(work, struct msm_spi, work_data);
1281 unsigned long flags;
1282 u32 status_error = 0;
1283
1284 mutex_lock(&dd->core_lock);
1285
1286 /* Don't allow power collapse until we release mutex */
1287 if (pm_qos_request_active(&qos_req_list))
1288 pm_qos_update_request(&qos_req_list,
1289 dd->pm_lat);
1290 if (dd->use_rlock)
1291 remote_mutex_lock(&dd->r_lock);
1292
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001293 clk_prepare_enable(dd->clk);
1294 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295 msm_spi_enable_irqs(dd);
1296
1297 if (!msm_spi_is_valid_state(dd)) {
1298 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1299 __func__);
1300 status_error = 1;
1301 }
1302
1303 spin_lock_irqsave(&dd->queue_lock, flags);
1304 while (!list_empty(&dd->queue)) {
1305 dd->cur_msg = list_entry(dd->queue.next,
1306 struct spi_message, queue);
1307 list_del_init(&dd->cur_msg->queue);
1308 spin_unlock_irqrestore(&dd->queue_lock, flags);
1309 if (status_error)
1310 dd->cur_msg->status = -EIO;
1311 else
1312 msm_spi_process_message(dd);
1313 if (dd->cur_msg->complete)
1314 dd->cur_msg->complete(dd->cur_msg->context);
1315 spin_lock_irqsave(&dd->queue_lock, flags);
1316 }
1317 dd->transfer_pending = 0;
1318 spin_unlock_irqrestore(&dd->queue_lock, flags);
1319
1320 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001321 clk_disable_unprepare(dd->clk);
1322 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323
1324 if (dd->use_rlock)
1325 remote_mutex_unlock(&dd->r_lock);
1326
1327 if (pm_qos_request_active(&qos_req_list))
1328 pm_qos_update_request(&qos_req_list,
1329 PM_QOS_DEFAULT_VALUE);
1330
1331 mutex_unlock(&dd->core_lock);
1332 /* If needed, this can be done after the current message is complete,
1333 and work can be continued upon resume. No motivation for now. */
1334 if (dd->suspended)
1335 wake_up_interruptible(&dd->continue_suspend);
1336}
1337
1338static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1339{
1340 struct msm_spi *dd;
1341 unsigned long flags;
1342 struct spi_transfer *tr;
1343
1344 dd = spi_master_get_devdata(spi->master);
1345 if (dd->suspended)
1346 return -EBUSY;
1347
1348 if (list_empty(&msg->transfers) || !msg->complete)
1349 return -EINVAL;
1350
1351 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1352 /* Check message parameters */
1353 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1354 (tr->bits_per_word &&
1355 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1356 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1357 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1358 "tx=%p, rx=%p\n",
1359 tr->speed_hz, tr->bits_per_word,
1360 tr->tx_buf, tr->rx_buf);
1361 return -EINVAL;
1362 }
1363 }
1364
1365 spin_lock_irqsave(&dd->queue_lock, flags);
1366 if (dd->suspended) {
1367 spin_unlock_irqrestore(&dd->queue_lock, flags);
1368 return -EBUSY;
1369 }
1370 dd->transfer_pending = 1;
1371 list_add_tail(&msg->queue, &dd->queue);
1372 spin_unlock_irqrestore(&dd->queue_lock, flags);
1373 queue_work(dd->workqueue, &dd->work_data);
1374 return 0;
1375}
1376
1377static int msm_spi_setup(struct spi_device *spi)
1378{
1379 struct msm_spi *dd;
1380 int rc = 0;
1381 u32 spi_ioc;
1382 u32 spi_config;
1383 u32 mask;
1384
1385 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1386 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1387 __func__, spi->bits_per_word);
1388 rc = -EINVAL;
1389 }
1390 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1391 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1392 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1393 rc = -EINVAL;
1394 }
1395
1396 if (rc)
1397 goto err_setup_exit;
1398
1399 dd = spi_master_get_devdata(spi->master);
1400
1401 mutex_lock(&dd->core_lock);
1402 if (dd->suspended) {
1403 mutex_unlock(&dd->core_lock);
1404 return -EBUSY;
1405 }
1406
1407 if (dd->use_rlock)
1408 remote_mutex_lock(&dd->r_lock);
1409
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001410 clk_prepare_enable(dd->clk);
1411 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001412
1413 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1414 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1415 if (spi->mode & SPI_CS_HIGH)
1416 spi_ioc |= mask;
1417 else
1418 spi_ioc &= ~mask;
1419 if (spi->mode & SPI_CPOL)
1420 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1421 else
1422 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1423
1424 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1425
1426 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1427 if (spi->mode & SPI_LOOP)
1428 spi_config |= SPI_CFG_LOOPBACK;
1429 else
1430 spi_config &= ~SPI_CFG_LOOPBACK;
1431 if (spi->mode & SPI_CPHA)
1432 spi_config &= ~SPI_CFG_INPUT_FIRST;
1433 else
1434 spi_config |= SPI_CFG_INPUT_FIRST;
1435 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1436
1437 /* Ensure previous write completed before disabling the clocks */
1438 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001439 clk_disable_unprepare(dd->clk);
1440 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441
1442 if (dd->use_rlock)
1443 remote_mutex_unlock(&dd->r_lock);
1444 mutex_unlock(&dd->core_lock);
1445
1446err_setup_exit:
1447 return rc;
1448}
1449
1450#ifdef CONFIG_DEBUG_FS
1451static int debugfs_iomem_x32_set(void *data, u64 val)
1452{
1453 writel_relaxed(val, data);
1454 /* Ensure the previous write completed. */
1455 mb();
1456 return 0;
1457}
1458
1459static int debugfs_iomem_x32_get(void *data, u64 *val)
1460{
1461 *val = readl_relaxed(data);
1462 /* Ensure the previous read completed. */
1463 mb();
1464 return 0;
1465}
1466
1467DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1468 debugfs_iomem_x32_set, "0x%08llx\n");
1469
1470static void spi_debugfs_init(struct msm_spi *dd)
1471{
1472 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1473 if (dd->dent_spi) {
1474 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001476 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1477 dd->debugfs_spi_regs[i] =
1478 debugfs_create_file(
1479 debugfs_spi_regs[i].name,
1480 debugfs_spi_regs[i].mode,
1481 dd->dent_spi,
1482 dd->base + debugfs_spi_regs[i].offset,
1483 &fops_iomem_x32);
1484 }
1485 }
1486}
1487
1488static void spi_debugfs_exit(struct msm_spi *dd)
1489{
1490 if (dd->dent_spi) {
1491 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001493 debugfs_remove_recursive(dd->dent_spi);
1494 dd->dent_spi = NULL;
1495 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1496 dd->debugfs_spi_regs[i] = NULL;
1497 }
1498}
1499#else
1500static void spi_debugfs_init(struct msm_spi *dd) {}
1501static void spi_debugfs_exit(struct msm_spi *dd) {}
1502#endif
1503
1504/* ===Device attributes begin=== */
1505static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1506 char *buf)
1507{
1508 struct spi_master *master = dev_get_drvdata(dev);
1509 struct msm_spi *dd = spi_master_get_devdata(master);
1510
1511 return snprintf(buf, PAGE_SIZE,
1512 "Device %s\n"
1513 "rx fifo_size = %d spi words\n"
1514 "tx fifo_size = %d spi words\n"
1515 "use_dma ? %s\n"
1516 "rx block size = %d bytes\n"
1517 "tx block size = %d bytes\n"
1518 "burst size = %d bytes\n"
1519 "DMA configuration:\n"
1520 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1521 "--statistics--\n"
1522 "Rx isrs = %d\n"
1523 "Tx isrs = %d\n"
1524 "DMA error = %d\n"
1525 "--debug--\n"
1526 "NA yet\n",
1527 dev_name(dev),
1528 dd->input_fifo_size,
1529 dd->output_fifo_size,
1530 dd->use_dma ? "yes" : "no",
1531 dd->input_block_size,
1532 dd->output_block_size,
1533 dd->burst_size,
1534 dd->tx_dma_chan,
1535 dd->rx_dma_chan,
1536 dd->tx_dma_crci,
1537 dd->rx_dma_crci,
1538 dd->stat_rx + dd->stat_dmov_rx,
1539 dd->stat_tx + dd->stat_dmov_tx,
1540 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1541 );
1542}
1543
1544/* Reset statistics on write */
1545static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1546 const char *buf, size_t count)
1547{
1548 struct msm_spi *dd = dev_get_drvdata(dev);
1549 dd->stat_rx = 0;
1550 dd->stat_tx = 0;
1551 dd->stat_dmov_rx = 0;
1552 dd->stat_dmov_tx = 0;
1553 dd->stat_dmov_rx_err = 0;
1554 dd->stat_dmov_tx_err = 0;
1555 return count;
1556}
1557
1558static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1559
1560static struct attribute *dev_attrs[] = {
1561 &dev_attr_stats.attr,
1562 NULL,
1563};
1564
1565static struct attribute_group dev_attr_grp = {
1566 .attrs = dev_attrs,
1567};
1568/* ===Device attributes end=== */
1569
1570/**
1571 * spi_dmov_tx_complete_func - DataMover tx completion callback
1572 *
1573 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1574 * spinlock @msm_dmov_lock held.
1575 */
1576static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1577 unsigned int result,
1578 struct msm_dmov_errdata *err)
1579{
1580 struct msm_spi *dd;
1581
1582 if (!(result & DMOV_RSLT_VALID)) {
1583 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1584 return;
1585 }
1586 /* restore original context */
1587 dd = container_of(cmd, struct msm_spi, tx_hdr);
1588 if (result & DMOV_RSLT_DONE)
1589 dd->stat_dmov_tx++;
1590 else {
1591 /* Error or flush */
1592 if (result & DMOV_RSLT_ERROR) {
1593 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1594 dd->stat_dmov_tx_err++;
1595 }
1596 if (result & DMOV_RSLT_FLUSH) {
1597 /*
1598 * Flushing normally happens in process of
1599 * removing, when we are waiting for outstanding
1600 * DMA commands to be flushed.
1601 */
1602 dev_info(dd->dev,
1603 "DMA channel flushed (0x%08x)\n", result);
1604 }
1605 if (err)
1606 dev_err(dd->dev,
1607 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1608 err->flush[0], err->flush[1], err->flush[2],
1609 err->flush[3], err->flush[4], err->flush[5]);
1610 dd->cur_msg->status = -EIO;
1611 complete(&dd->transfer_complete);
1612 }
1613}
1614
1615/**
1616 * spi_dmov_rx_complete_func - DataMover rx completion callback
1617 *
1618 * Executed in IRQ context (Data Mover's IRQ)
1619 * DataMover's spinlock @msm_dmov_lock held.
1620 */
1621static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1622 unsigned int result,
1623 struct msm_dmov_errdata *err)
1624{
1625 struct msm_spi *dd;
1626
1627 if (!(result & DMOV_RSLT_VALID)) {
1628 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1629 result, cmd);
1630 return;
1631 }
1632 /* restore original context */
1633 dd = container_of(cmd, struct msm_spi, rx_hdr);
1634 if (result & DMOV_RSLT_DONE) {
1635 dd->stat_dmov_rx++;
1636 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1637 return;
1638 complete(&dd->transfer_complete);
1639 } else {
1640 /** Error or flush */
1641 if (result & DMOV_RSLT_ERROR) {
1642 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1643 dd->stat_dmov_rx_err++;
1644 }
1645 if (result & DMOV_RSLT_FLUSH) {
1646 dev_info(dd->dev,
1647 "DMA channel flushed(0x%08x)\n", result);
1648 }
1649 if (err)
1650 dev_err(dd->dev,
1651 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1652 err->flush[0], err->flush[1], err->flush[2],
1653 err->flush[3], err->flush[4], err->flush[5]);
1654 dd->cur_msg->status = -EIO;
1655 complete(&dd->transfer_complete);
1656 }
1657}
1658
1659static inline u32 get_chunk_size(struct msm_spi *dd)
1660{
1661 u32 cache_line = dma_get_cache_alignment();
1662
1663 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1664 roundup(dd->burst_size, cache_line))*2;
1665}
1666
1667static void msm_spi_teardown_dma(struct msm_spi *dd)
1668{
1669 int limit = 0;
1670
1671 if (!dd->use_dma)
1672 return;
1673
1674 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001675 msm_dmov_flush(dd->tx_dma_chan, 1);
1676 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001677 msleep(10);
1678 }
1679
1680 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1681 dd->tx_dmov_cmd_dma);
1682 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1683 dd->tx_padding = dd->rx_padding = NULL;
1684}
1685
1686static __init int msm_spi_init_dma(struct msm_spi *dd)
1687{
1688 dmov_box *box;
1689 u32 cache_line = dma_get_cache_alignment();
1690
1691 /* Allocate all as one chunk, since all is smaller than page size */
1692
1693 /* We send NULL device, since it requires coherent_dma_mask id
1694 device definition, we're okay with using system pool */
1695 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1696 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1697 if (dd->tx_dmov_cmd == NULL)
1698 return -ENOMEM;
1699
1700 /* DMA addresses should be 64 bit aligned aligned */
1701 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1702 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1703 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1704 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1705
1706 /* Buffers should be aligned to cache line */
1707 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1708 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1709 sizeof(struct spi_dmov_cmd), cache_line);
1710 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1711 cache_line);
1712 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1713 cache_line);
1714
1715 /* Setup DM commands */
1716 box = &(dd->rx_dmov_cmd->box);
1717 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1718 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1719 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1720 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1721 offsetof(struct spi_dmov_cmd, cmd_ptr));
1722 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001723
1724 box = &(dd->tx_dmov_cmd->box);
1725 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1726 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1727 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1728 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1729 offsetof(struct spi_dmov_cmd, cmd_ptr));
1730 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001731
1732 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1733 CMD_DST_CRCI(dd->tx_dma_crci);
1734 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1735 SPI_OUTPUT_FIFO;
1736 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1737 CMD_SRC_CRCI(dd->rx_dma_crci);
1738 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1739 SPI_INPUT_FIFO;
1740
1741 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001742 msm_dmov_flush(dd->tx_dma_chan, 1);
1743 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001744
1745 return 0;
1746}
1747
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001748struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1749{
1750 struct device_node *node = pdev->dev.of_node;
1751 struct msm_spi_platform_data *pdata;
1752
1753 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1754 if (!pdata) {
1755 pr_err("Unable to allocate platform data\n");
1756 return NULL;
1757 }
1758
1759 of_property_read_u32(node, "spi-max-frequency",
1760 &pdata->max_clock_speed);
1761
1762 return pdata;
1763}
1764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765static int __init msm_spi_probe(struct platform_device *pdev)
1766{
1767 struct spi_master *master;
1768 struct msm_spi *dd;
1769 struct resource *resource;
1770 int rc = -ENXIO;
1771 int locked = 0;
1772 int i = 0;
1773 int clk_enabled = 0;
1774 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001775 struct msm_spi_platform_data *pdata;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776
1777 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1778 if (!master) {
1779 rc = -ENOMEM;
1780 dev_err(&pdev->dev, "master allocation failed\n");
1781 goto err_probe_exit;
1782 }
1783
1784 master->bus_num = pdev->id;
1785 master->mode_bits = SPI_SUPPORTED_MODES;
1786 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1787 master->setup = msm_spi_setup;
1788 master->transfer = msm_spi_transfer;
1789 platform_set_drvdata(pdev, master);
1790 dd = spi_master_get_devdata(master);
1791
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001792 if (pdev->dev.of_node) {
1793 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1794 master->dev.of_node = pdev->dev.of_node;
1795 pdata = msm_spi_dt_to_pdata(pdev);
1796 if (!pdata) {
1797 rc = -ENOMEM;
1798 goto err_probe_exit;
1799 }
1800 } else {
1801 pdata = pdev->dev.platform_data;
1802 dd->qup_ver = SPI_QUP_VERSION_NONE;
1803 }
1804
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001806 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001807 if (!resource) {
1808 rc = -ENXIO;
1809 goto err_probe_res;
1810 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001812 dd->mem_phys_addr = resource->start;
1813 dd->mem_size = resource_size(resource);
1814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001815 if (pdata) {
1816 if (pdata->dma_config) {
1817 rc = pdata->dma_config();
1818 if (rc) {
1819 dev_warn(&pdev->dev,
1820 "%s: DM mode not supported\n",
1821 __func__);
1822 dd->use_dma = 0;
1823 goto skip_dma_resources;
1824 }
1825 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001826 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827 if (resource) {
1828 dd->rx_dma_chan = resource->start;
1829 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001830 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1831 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832 if (!resource) {
1833 rc = -ENXIO;
1834 goto err_probe_res;
1835 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837 dd->rx_dma_crci = resource->start;
1838 dd->tx_dma_crci = resource->end;
1839 dd->use_dma = 1;
1840 master->dma_alignment = dma_get_cache_alignment();
1841 }
1842
1843skip_dma_resources:
1844 if (pdata->gpio_config) {
1845 rc = pdata->gpio_config();
1846 if (rc) {
1847 dev_err(&pdev->dev,
1848 "%s: error configuring GPIOs\n",
1849 __func__);
1850 goto err_probe_gpio;
1851 }
1852 }
1853 }
1854
1855 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001856 resource = platform_get_resource(pdev, IORESOURCE_IO, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001857 dd->spi_gpios[i] = resource ? resource->start : -1;
1858 }
1859
Harini Jayaramane4c06192011-09-28 16:26:39 -06001860 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001861 resource = platform_get_resource(pdev, IORESOURCE_IO,
1862 i + ARRAY_SIZE(spi_rsrcs));
Harini Jayaramane4c06192011-09-28 16:26:39 -06001863 dd->cs_gpios[i].gpio_num = resource ? resource->start : -1;
1864 dd->cs_gpios[i].valid = 0;
1865 }
1866
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001867 rc = msm_spi_request_gpios(dd);
1868 if (rc)
1869 goto err_probe_gpio;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001871 spin_lock_init(&dd->queue_lock);
1872 mutex_init(&dd->core_lock);
1873 INIT_LIST_HEAD(&dd->queue);
1874 INIT_WORK(&dd->work_data, msm_spi_workq);
1875 init_waitqueue_head(&dd->continue_suspend);
1876 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001877 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 if (!dd->workqueue)
1879 goto err_probe_workq;
1880
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001881 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1882 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001883 rc = -ENXIO;
1884 goto err_probe_reqmem;
1885 }
1886
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001887 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1888 if (!dd->base) {
1889 rc = -ENOMEM;
1890 goto err_probe_reqmem;
1891 }
1892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001893 if (pdata && pdata->rsl_id) {
1894 struct remote_mutex_id rmid;
1895 rmid.r_spinlock_id = pdata->rsl_id;
1896 rmid.delay_us = SPI_TRYLOCK_DELAY;
1897
1898 rc = remote_mutex_init(&dd->r_lock, &rmid);
1899 if (rc) {
1900 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1901 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1902 __func__, rc);
1903 goto err_probe_rlock_init;
1904 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001906 dd->use_rlock = 1;
1907 dd->pm_lat = pdata->pm_lat;
1908 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1909 PM_QOS_DEFAULT_VALUE);
1910 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001912 mutex_lock(&dd->core_lock);
1913 if (dd->use_rlock)
1914 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001916 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001918 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001919 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001920 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 rc = PTR_ERR(dd->clk);
1922 goto err_probe_clk_get;
1923 }
1924
Matt Wagantallac294852011-08-17 15:44:58 -07001925 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001927 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 rc = PTR_ERR(dd->pclk);
1929 goto err_probe_pclk_get;
1930 }
1931
1932 if (pdata && pdata->max_clock_speed)
1933 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
1934
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001935 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001937 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938 __func__);
1939 goto err_probe_clk_enable;
1940 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001941
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001942 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001943 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001945 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 __func__);
1947 goto err_probe_pclk_enable;
1948 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001951 rc = msm_spi_configure_gsbi(dd, pdev);
1952 if (rc)
1953 goto err_probe_gsbi;
1954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 msm_spi_calculate_fifo_size(dd);
1956 if (dd->use_dma) {
1957 rc = msm_spi_init_dma(dd);
1958 if (rc)
1959 goto err_probe_dma;
1960 }
1961
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001962 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963 /*
1964 * The SPI core generates a bogus input overrun error on some targets,
1965 * when a transition from run to reset state occurs and if the FIFO has
1966 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
1967 * bit.
1968 */
1969 msm_spi_enable_error_flags(dd);
1970
1971 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
1972 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1973 if (rc)
1974 goto err_probe_state;
1975
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001976 clk_disable_unprepare(dd->clk);
1977 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 clk_enabled = 0;
1979 pclk_enabled = 0;
1980
1981 dd->suspended = 0;
1982 dd->transfer_pending = 0;
1983 dd->multi_xfr = 0;
1984 dd->mode = SPI_MODE_NONE;
1985
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001986 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001987 if (rc)
1988 goto err_probe_irq;
1989
1990 msm_spi_disable_irqs(dd);
1991 if (dd->use_rlock)
1992 remote_mutex_unlock(&dd->r_lock);
1993
1994 mutex_unlock(&dd->core_lock);
1995 locked = 0;
1996
1997 rc = spi_register_master(master);
1998 if (rc)
1999 goto err_probe_reg_master;
2000
2001 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2002 if (rc) {
2003 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2004 goto err_attrs;
2005 }
2006
2007 spi_debugfs_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002008 return 0;
2009
2010err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002011 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002013err_probe_irq:
2014err_probe_state:
2015 msm_spi_teardown_dma(dd);
2016err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002017err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002019 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002020err_probe_pclk_enable:
2021 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002022 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002023err_probe_clk_enable:
2024 clk_put(dd->pclk);
2025err_probe_pclk_get:
2026 clk_put(dd->clk);
2027err_probe_clk_get:
2028 if (locked) {
2029 if (dd->use_rlock)
2030 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002032 mutex_unlock(&dd->core_lock);
2033 }
2034err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002035err_probe_reqmem:
2036 destroy_workqueue(dd->workqueue);
2037err_probe_workq:
2038 msm_spi_free_gpios(dd);
2039err_probe_gpio:
2040 if (pdata && pdata->gpio_release)
2041 pdata->gpio_release();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042err_probe_res:
2043 spi_master_put(master);
2044err_probe_exit:
2045 return rc;
2046}
2047
2048#ifdef CONFIG_PM
2049static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2050{
2051 struct spi_master *master = platform_get_drvdata(pdev);
2052 struct msm_spi *dd;
2053 unsigned long flags;
2054
2055 if (!master)
2056 goto suspend_exit;
2057 dd = spi_master_get_devdata(master);
2058 if (!dd)
2059 goto suspend_exit;
2060
2061 /* Make sure nothing is added to the queue while we're suspending */
2062 spin_lock_irqsave(&dd->queue_lock, flags);
2063 dd->suspended = 1;
2064 spin_unlock_irqrestore(&dd->queue_lock, flags);
2065
2066 /* Wait for transactions to end, or time out */
2067 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2068 msm_spi_free_gpios(dd);
2069
2070suspend_exit:
2071 return 0;
2072}
2073
2074static int msm_spi_resume(struct platform_device *pdev)
2075{
2076 struct spi_master *master = platform_get_drvdata(pdev);
2077 struct msm_spi *dd;
2078
2079 if (!master)
2080 goto resume_exit;
2081 dd = spi_master_get_devdata(master);
2082 if (!dd)
2083 goto resume_exit;
2084
2085 BUG_ON(msm_spi_request_gpios(dd) != 0);
2086 dd->suspended = 0;
2087resume_exit:
2088 return 0;
2089}
2090#else
2091#define msm_spi_suspend NULL
2092#define msm_spi_resume NULL
2093#endif /* CONFIG_PM */
2094
2095static int __devexit msm_spi_remove(struct platform_device *pdev)
2096{
2097 struct spi_master *master = platform_get_drvdata(pdev);
2098 struct msm_spi *dd = spi_master_get_devdata(master);
2099 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2100
2101 pm_qos_remove_request(&qos_req_list);
2102 spi_debugfs_exit(dd);
2103 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002105 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002106 if (pdata && pdata->gpio_release)
2107 pdata->gpio_release();
2108
2109 msm_spi_free_gpios(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002110 clk_put(dd->clk);
2111 clk_put(dd->pclk);
2112 destroy_workqueue(dd->workqueue);
2113 platform_set_drvdata(pdev, 0);
2114 spi_unregister_master(master);
2115 spi_master_put(master);
2116
2117 return 0;
2118}
2119
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002120static struct of_device_id msm_spi_dt_match[] = {
2121 {
2122 .compatible = "qcom,spi-qup-v2",
2123 },
2124 {}
2125};
2126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002127static struct platform_driver msm_spi_driver = {
2128 .driver = {
2129 .name = SPI_DRV_NAME,
2130 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002131 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132 },
2133 .suspend = msm_spi_suspend,
2134 .resume = msm_spi_resume,
2135 .remove = __exit_p(msm_spi_remove),
2136};
2137
2138static int __init msm_spi_init(void)
2139{
2140 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2141}
2142module_init(msm_spi_init);
2143
2144static void __exit msm_spi_exit(void)
2145{
2146 platform_driver_unregister(&msm_spi_driver);
2147}
2148module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002149
2150MODULE_LICENSE("GPL v2");
2151MODULE_VERSION("0.4");
2152MODULE_ALIAS("platform:"SPI_DRV_NAME);