blob: c26da60f911f472d1f290716acb3064f9218bfd5 [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070046static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
47 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048{
49 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050 unsigned long gsbi_mem_phys_addr;
51 size_t gsbi_mem_size;
52 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070056 return 0;
57
58 gsbi_mem_phys_addr = resource->start;
59 gsbi_mem_size = resource_size(resource);
60 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
61 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063
64 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size);
66 if (!gsbi_base)
67 return -ENXIO;
68
69 /* Set GSBI to SPI mode */
70 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 return 0;
73}
74
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070077 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
78 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
79 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
80 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
81 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
82 if (dd->qup_ver)
83 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084}
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086static inline int msm_spi_request_gpios(struct msm_spi *dd)
87{
88 int i;
89 int result = 0;
90
91 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
92 if (dd->spi_gpios[i] >= 0) {
93 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
94 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060095 dev_err(dd->dev, "%s: gpio_request for pin %d "
96 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 dd->spi_gpios[i], result);
98 goto error;
99 }
100 }
101 }
102 return 0;
103
104error:
105 for (; --i >= 0;) {
106 if (dd->spi_gpios[i] >= 0)
107 gpio_free(dd->spi_gpios[i]);
108 }
109 return result;
110}
111
112static inline void msm_spi_free_gpios(struct msm_spi *dd)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
117 if (dd->spi_gpios[i] >= 0)
118 gpio_free(dd->spi_gpios[i]);
119 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600120
121 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
122 if (dd->cs_gpios[i].valid) {
123 gpio_free(dd->cs_gpios[i].gpio_num);
124 dd->cs_gpios[i].valid = 0;
125 }
126 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127}
128
129static void msm_spi_clock_set(struct msm_spi *dd, int speed)
130{
131 int rc;
132
133 rc = clk_set_rate(dd->clk, speed);
134 if (!rc)
135 dd->clock_speed = speed;
136}
137
138static int msm_spi_calculate_size(int *fifo_size,
139 int *block_size,
140 int block,
141 int mult)
142{
143 int words;
144
145 switch (block) {
146 case 0:
147 words = 1; /* 4 bytes */
148 break;
149 case 1:
150 words = 4; /* 16 bytes */
151 break;
152 case 2:
153 words = 8; /* 32 bytes */
154 break;
155 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 switch (mult) {
160 case 0:
161 *fifo_size = words * 2;
162 break;
163 case 1:
164 *fifo_size = words * 4;
165 break;
166 case 2:
167 *fifo_size = words * 8;
168 break;
169 case 3:
170 *fifo_size = words * 16;
171 break;
172 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 *block_size = words * sizeof(u32); /* in bytes */
177 return 0;
178}
179
180static void get_next_transfer(struct msm_spi *dd)
181{
182 struct spi_transfer *t = dd->cur_transfer;
183
184 if (t->transfer_list.next != &dd->cur_msg->transfers) {
185 dd->cur_transfer = list_entry(t->transfer_list.next,
186 struct spi_transfer,
187 transfer_list);
188 dd->write_buf = dd->cur_transfer->tx_buf;
189 dd->read_buf = dd->cur_transfer->rx_buf;
190 }
191}
192
193static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
194{
195 u32 spi_iom;
196 int block;
197 int mult;
198
199 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
200
201 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
202 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
203 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
204 block, mult)) {
205 goto fifo_size_err;
206 }
207
208 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
209 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
210 if (msm_spi_calculate_size(&dd->output_fifo_size,
211 &dd->output_block_size, block, mult)) {
212 goto fifo_size_err;
213 }
214 /* DM mode is not available for this block size */
215 if (dd->input_block_size == 4 || dd->output_block_size == 4)
216 dd->use_dma = 0;
217
218 /* DM mode is currently unsupported for different block sizes */
219 if (dd->input_block_size != dd->output_block_size)
220 dd->use_dma = 0;
221
222 if (dd->use_dma)
223 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
224
225 return;
226
227fifo_size_err:
228 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700229 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return;
231}
232
233static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
234{
235 u32 data_in;
236 int i;
237 int shift;
238
239 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
240 if (dd->read_buf) {
241 for (i = 0; (i < dd->bytes_per_word) &&
242 dd->rx_bytes_remaining; i++) {
243 /* The data format depends on bytes_per_word:
244 4 bytes: 0x12345678
245 3 bytes: 0x00123456
246 2 bytes: 0x00001234
247 1 byte : 0x00000012
248 */
249 shift = 8 * (dd->bytes_per_word - i - 1);
250 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
251 dd->rx_bytes_remaining--;
252 }
253 } else {
254 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
255 dd->rx_bytes_remaining -= dd->bytes_per_word;
256 else
257 dd->rx_bytes_remaining = 0;
258 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 dd->read_xfr_cnt++;
261 if (dd->multi_xfr) {
262 if (!dd->rx_bytes_remaining)
263 dd->read_xfr_cnt = 0;
264 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
265 dd->read_len) {
266 struct spi_transfer *t = dd->cur_rx_transfer;
267 if (t->transfer_list.next != &dd->cur_msg->transfers) {
268 t = list_entry(t->transfer_list.next,
269 struct spi_transfer,
270 transfer_list);
271 dd->read_buf = t->rx_buf;
272 dd->read_len = t->len;
273 dd->read_xfr_cnt = 0;
274 dd->cur_rx_transfer = t;
275 }
276 }
277 }
278}
279
280static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
281{
282 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
283
284 return spi_op & SPI_OP_STATE_VALID;
285}
286
287static inline int msm_spi_wait_valid(struct msm_spi *dd)
288{
289 unsigned long delay = 0;
290 unsigned long timeout = 0;
291
292 if (dd->clock_speed == 0)
293 return -EINVAL;
294 /*
295 * Based on the SPI clock speed, sufficient time
296 * should be given for the SPI state transition
297 * to occur
298 */
299 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
300 /*
301 * For small delay values, the default timeout would
302 * be one jiffy
303 */
304 if (delay < SPI_DELAY_THRESHOLD)
305 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600306
307 /* Adding one to round off to the nearest jiffy */
308 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 while (!msm_spi_is_valid_state(dd)) {
310 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600311 if (!msm_spi_is_valid_state(dd)) {
312 if (dd->cur_msg)
313 dd->cur_msg->status = -EIO;
314 dev_err(dd->dev, "%s: SPI operational state"
315 "not valid\n", __func__);
316 return -ETIMEDOUT;
317 } else
318 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 }
320 /*
321 * For smaller values of delay, context switch time
322 * would negate the usage of usleep
323 */
324 if (delay > 20)
325 usleep(delay);
326 else if (delay)
327 udelay(delay);
328 }
329 return 0;
330}
331
332static inline int msm_spi_set_state(struct msm_spi *dd,
333 enum msm_spi_state state)
334{
335 enum msm_spi_state cur_state;
336 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700337 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338 cur_state = readl_relaxed(dd->base + SPI_STATE);
339 /* Per spec:
340 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
341 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
342 (state == SPI_OP_STATE_RESET)) {
343 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
344 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
345 } else {
346 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
347 dd->base + SPI_STATE);
348 }
349 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700350 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351
352 return 0;
353}
354
355static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
356{
357 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
358
359 if (n != (*config & SPI_CFG_N))
360 *config = (*config & ~SPI_CFG_N) | n;
361
362 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
363 if (dd->read_buf == NULL)
364 *config |= SPI_NO_INPUT;
365 if (dd->write_buf == NULL)
366 *config |= SPI_NO_OUTPUT;
367 }
368}
369
370static void msm_spi_set_config(struct msm_spi *dd, int bpw)
371{
372 u32 spi_config;
373
374 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
375
376 if (dd->cur_msg->spi->mode & SPI_CPHA)
377 spi_config &= ~SPI_CFG_INPUT_FIRST;
378 else
379 spi_config |= SPI_CFG_INPUT_FIRST;
380 if (dd->cur_msg->spi->mode & SPI_LOOP)
381 spi_config |= SPI_CFG_LOOPBACK;
382 else
383 spi_config &= ~SPI_CFG_LOOPBACK;
384 msm_spi_add_configs(dd, &spi_config, bpw-1);
385 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
386 msm_spi_set_qup_config(dd, bpw);
387}
388
389static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
390{
391 dmov_box *box;
392 int bytes_to_send, num_rows, bytes_sent;
393 u32 num_transfers;
394
395 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530396 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 if (dd->write_len && !dd->read_len) {
398 /* WR-WR transfer */
399 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
400 dd->write_buf = dd->temp_buf;
401 } else {
402 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
403 /* For WR-RD transfer, bytes_sent can be negative */
404 if (bytes_sent < 0)
405 bytes_sent = 0;
406 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530407 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gunda2b285652012-07-30 13:22:39 +0530408 * 4K bytes for targets that have only 12 bits in
409 * QUP_MAX_OUTPUT_CNT register. If the target supports
410 * more than 12bits then we send the data in chunks of
411 * the infinite_mode value that is defined in the
412 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530413 */
414 if (!dd->pdata->infinite_mode)
Kiran Gunda2b285652012-07-30 13:22:39 +0530415 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530416 else
Kiran Gunda2b285652012-07-30 13:22:39 +0530417 dd->max_trfr_len = (dd->pdata->infinite_mode) *
418 (dd->bytes_per_word);
419
420 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
421 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
424 dd->unaligned_len = bytes_to_send % dd->burst_size;
425 num_rows = bytes_to_send / dd->burst_size;
426
427 dd->mode = SPI_DMOV_MODE;
428
429 if (num_rows) {
430 /* src in 16 MSB, dst in 16 LSB */
431 box = &dd->tx_dmov_cmd->box;
432 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
433 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
434 box->num_rows = (num_rows << 16) | num_rows;
435 box->row_offset = (dd->burst_size << 16) | 0;
436
437 box = &dd->rx_dmov_cmd->box;
438 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
439 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
440 box->num_rows = (num_rows << 16) | num_rows;
441 box->row_offset = (0 << 16) | dd->burst_size;
442
443 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
444 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
445 offsetof(struct spi_dmov_cmd, box));
446 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
447 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
448 offsetof(struct spi_dmov_cmd, box));
449 } else {
450 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
451 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
452 offsetof(struct spi_dmov_cmd, single_pad));
453 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
454 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
455 offsetof(struct spi_dmov_cmd, single_pad));
456 }
457
458 if (!dd->unaligned_len) {
459 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
460 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
461 } else {
462 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
463 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
464 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
465
466 if ((dd->multi_xfr) && (dd->read_len <= 0))
467 offset = dd->cur_msg_len - dd->unaligned_len;
468
469 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
470 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
471
472 memset(dd->tx_padding, 0, dd->burst_size);
473 memset(dd->rx_padding, 0, dd->burst_size);
474 if (dd->write_buf)
475 memcpy(dd->tx_padding, dd->write_buf + offset,
476 dd->unaligned_len);
477
478 tx_cmd->src = dd->tx_padding_dma;
479 rx_cmd->dst = dd->rx_padding_dma;
480 tx_cmd->len = rx_cmd->len = dd->burst_size;
481 }
482 /* This also takes care of the padding dummy buf
483 Since this is set to the correct length, the
484 dummy bytes won't be actually sent */
485 if (dd->multi_xfr) {
486 u32 write_transfers = 0;
487 u32 read_transfers = 0;
488
489 if (dd->write_len > 0) {
490 write_transfers = DIV_ROUND_UP(dd->write_len,
491 dd->bytes_per_word);
492 writel_relaxed(write_transfers,
493 dd->base + SPI_MX_OUTPUT_COUNT);
494 }
495 if (dd->read_len > 0) {
496 /*
497 * The read following a write transfer must take
498 * into account, that the bytes pertaining to
499 * the write transfer needs to be discarded,
500 * before the actual read begins.
501 */
502 read_transfers = DIV_ROUND_UP(dd->read_len +
503 dd->write_len,
504 dd->bytes_per_word);
505 writel_relaxed(read_transfers,
506 dd->base + SPI_MX_INPUT_COUNT);
507 }
508 } else {
509 if (dd->write_buf)
510 writel_relaxed(num_transfers,
511 dd->base + SPI_MX_OUTPUT_COUNT);
512 if (dd->read_buf)
513 writel_relaxed(num_transfers,
514 dd->base + SPI_MX_INPUT_COUNT);
515 }
516}
517
518static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
519{
520 dma_coherent_pre_ops();
521 if (dd->write_buf)
522 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
523 if (dd->read_buf)
524 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
525}
526
Kiran Gunda2b285652012-07-30 13:22:39 +0530527/* SPI core on targets that does not support infinite mode can send
528 maximum of 4K transfers or 64K transfers depending up on size of
529 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
530 chunks. Upon completion we send the next chunk, or complete the
531 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530532 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533*/
534static int msm_spi_dm_send_next(struct msm_spi *dd)
535{
536 /* By now we should have sent all the bytes in FIFO mode,
537 * However to make things right, we'll check anyway.
538 */
539 if (dd->mode != SPI_DMOV_MODE)
540 return 0;
541
Kiran Gundae8f16742012-06-27 10:06:32 +0530542 /* On targets which does not support infinite mode,
543 We need to send more chunks, if we sent max last time */
Kiran Gunda2b285652012-07-30 13:22:39 +0530544 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
545 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
547 return 0;
548 dd->read_len = dd->write_len = 0;
549 msm_spi_setup_dm_transfer(dd);
550 msm_spi_enqueue_dm_commands(dd);
551 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
552 return 0;
553 return 1;
554 } else if (dd->read_len && dd->write_len) {
555 dd->tx_bytes_remaining -= dd->cur_transfer->len;
556 if (list_is_last(&dd->cur_transfer->transfer_list,
557 &dd->cur_msg->transfers))
558 return 0;
559 get_next_transfer(dd);
560 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
561 return 0;
562 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
563 dd->read_buf = dd->temp_buf;
564 dd->read_len = dd->write_len = -1;
565 msm_spi_setup_dm_transfer(dd);
566 msm_spi_enqueue_dm_commands(dd);
567 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
568 return 0;
569 return 1;
570 }
571 return 0;
572}
573
574static inline void msm_spi_ack_transfer(struct msm_spi *dd)
575{
576 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
577 SPI_OP_MAX_OUTPUT_DONE_FLAG,
578 dd->base + SPI_OPERATIONAL);
579 /* Ensure done flag was cleared before proceeding further */
580 mb();
581}
582
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700583/* Figure which irq occured and call the relevant functions */
584static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
585{
586 u32 op, ret = IRQ_NONE;
587 struct msm_spi *dd = dev_id;
588
589 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
590 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
591 struct spi_master *master = dev_get_drvdata(dd->dev);
592 ret |= msm_spi_error_irq(irq, master);
593 }
594
595 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
596 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
597 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
598 dd->base + SPI_OPERATIONAL);
599 /*
600 * Ensure service flag was cleared before further
601 * processing of interrupt.
602 */
603 mb();
604 ret |= msm_spi_input_irq(irq, dev_id);
605 }
606
607 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
608 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
609 dd->base + SPI_OPERATIONAL);
610 /*
611 * Ensure service flag was cleared before further
612 * processing of interrupt.
613 */
614 mb();
615 ret |= msm_spi_output_irq(irq, dev_id);
616 }
617
618 if (dd->done) {
619 complete(&dd->transfer_complete);
620 dd->done = 0;
621 }
622 return ret;
623}
624
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
626{
627 struct msm_spi *dd = dev_id;
628
629 dd->stat_rx++;
630
631 if (dd->mode == SPI_MODE_NONE)
632 return IRQ_HANDLED;
633
634 if (dd->mode == SPI_DMOV_MODE) {
635 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
636 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
637 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
638 msm_spi_ack_transfer(dd);
639 if (dd->unaligned_len == 0) {
640 if (atomic_inc_return(&dd->rx_irq_called) == 1)
641 return IRQ_HANDLED;
642 }
643 msm_spi_complete(dd);
644 return IRQ_HANDLED;
645 }
646 return IRQ_NONE;
647 }
648
649 if (dd->mode == SPI_FIFO_MODE) {
650 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
651 SPI_OP_IP_FIFO_NOT_EMPTY) &&
652 (dd->rx_bytes_remaining > 0)) {
653 msm_spi_read_word_from_fifo(dd);
654 }
655 if (dd->rx_bytes_remaining == 0)
656 msm_spi_complete(dd);
657 }
658
659 return IRQ_HANDLED;
660}
661
662static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
663{
664 u32 word;
665 u8 byte;
666 int i;
667
668 word = 0;
669 if (dd->write_buf) {
670 for (i = 0; (i < dd->bytes_per_word) &&
671 dd->tx_bytes_remaining; i++) {
672 dd->tx_bytes_remaining--;
673 byte = *dd->write_buf++;
674 word |= (byte << (BITS_PER_BYTE * (3 - i)));
675 }
676 } else
677 if (dd->tx_bytes_remaining > dd->bytes_per_word)
678 dd->tx_bytes_remaining -= dd->bytes_per_word;
679 else
680 dd->tx_bytes_remaining = 0;
681 dd->write_xfr_cnt++;
682 if (dd->multi_xfr) {
683 if (!dd->tx_bytes_remaining)
684 dd->write_xfr_cnt = 0;
685 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
686 dd->write_len) {
687 struct spi_transfer *t = dd->cur_tx_transfer;
688 if (t->transfer_list.next != &dd->cur_msg->transfers) {
689 t = list_entry(t->transfer_list.next,
690 struct spi_transfer,
691 transfer_list);
692 dd->write_buf = t->tx_buf;
693 dd->write_len = t->len;
694 dd->write_xfr_cnt = 0;
695 dd->cur_tx_transfer = t;
696 }
697 }
698 }
699 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
700}
701
702static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
703{
704 int count = 0;
705
706 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
707 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
708 SPI_OP_OUTPUT_FIFO_FULL)) {
709 msm_spi_write_word_to_fifo(dd);
710 count++;
711 }
712}
713
714static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
715{
716 struct msm_spi *dd = dev_id;
717
718 dd->stat_tx++;
719
720 if (dd->mode == SPI_MODE_NONE)
721 return IRQ_HANDLED;
722
723 if (dd->mode == SPI_DMOV_MODE) {
724 /* TX_ONLY transaction is handled here
725 This is the only place we send complete at tx and not rx */
726 if (dd->read_buf == NULL &&
727 readl_relaxed(dd->base + SPI_OPERATIONAL) &
728 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
729 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530730 if (atomic_inc_return(&dd->tx_irq_called) == 1)
731 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 msm_spi_complete(dd);
733 return IRQ_HANDLED;
734 }
735 return IRQ_NONE;
736 }
737
738 /* Output FIFO is empty. Transmit any outstanding write data. */
739 if (dd->mode == SPI_FIFO_MODE)
740 msm_spi_write_rmn_to_fifo(dd);
741
742 return IRQ_HANDLED;
743}
744
745static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
746{
747 struct spi_master *master = dev_id;
748 struct msm_spi *dd = spi_master_get_devdata(master);
749 u32 spi_err;
750
751 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
752 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
753 dev_warn(master->dev.parent, "SPI output overrun error\n");
754 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
755 dev_warn(master->dev.parent, "SPI input underrun error\n");
756 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
757 dev_warn(master->dev.parent, "SPI output underrun error\n");
758 msm_spi_get_clk_err(dd, &spi_err);
759 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
760 dev_warn(master->dev.parent, "SPI clock overrun error\n");
761 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
762 dev_warn(master->dev.parent, "SPI clock underrun error\n");
763 msm_spi_clear_error_flags(dd);
764 msm_spi_ack_clk_err(dd);
765 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
766 mb();
767 return IRQ_HANDLED;
768}
769
770static int msm_spi_map_dma_buffers(struct msm_spi *dd)
771{
772 struct device *dev;
773 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600774 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 void *tx_buf, *rx_buf;
776 unsigned tx_len, rx_len;
777 int ret = -EINVAL;
778
779 dev = &dd->cur_msg->spi->dev;
780 first_xfr = dd->cur_transfer;
781 tx_buf = (void *)first_xfr->tx_buf;
782 rx_buf = first_xfr->rx_buf;
783 tx_len = rx_len = first_xfr->len;
784
785 /*
786 * For WR-WR and WR-RD transfers, we allocate our own temporary
787 * buffer and copy the data to/from the client buffers.
788 */
789 if (dd->multi_xfr) {
790 dd->temp_buf = kzalloc(dd->cur_msg_len,
791 GFP_KERNEL | __GFP_DMA);
792 if (!dd->temp_buf)
793 return -ENOMEM;
794 nxt_xfr = list_entry(first_xfr->transfer_list.next,
795 struct spi_transfer, transfer_list);
796
797 if (dd->write_len && !dd->read_len) {
798 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
799 goto error;
800
801 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
802 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
803 nxt_xfr->len);
804 tx_buf = dd->temp_buf;
805 tx_len = dd->cur_msg_len;
806 } else {
807 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
808 goto error;
809
810 rx_buf = dd->temp_buf;
811 rx_len = dd->cur_msg_len;
812 }
813 }
814 if (tx_buf != NULL) {
815 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
816 tx_len, DMA_TO_DEVICE);
817 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
818 dev_err(dev, "dma %cX %d bytes error\n",
819 'T', tx_len);
820 ret = -ENOMEM;
821 goto error;
822 }
823 }
824 if (rx_buf != NULL) {
825 dma_addr_t dma_handle;
826 dma_handle = dma_map_single(dev, rx_buf,
827 rx_len, DMA_FROM_DEVICE);
828 if (dma_mapping_error(NULL, dma_handle)) {
829 dev_err(dev, "dma %cX %d bytes error\n",
830 'R', rx_len);
831 if (tx_buf != NULL)
832 dma_unmap_single(NULL, first_xfr->tx_dma,
833 tx_len, DMA_TO_DEVICE);
834 ret = -ENOMEM;
835 goto error;
836 }
837 if (dd->multi_xfr)
838 nxt_xfr->rx_dma = dma_handle;
839 else
840 first_xfr->rx_dma = dma_handle;
841 }
842 return 0;
843
844error:
845 kfree(dd->temp_buf);
846 dd->temp_buf = NULL;
847 return ret;
848}
849
850static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
851{
852 struct device *dev;
853 u32 offset;
854
855 dev = &dd->cur_msg->spi->dev;
856 if (dd->cur_msg->is_dma_mapped)
857 goto unmap_end;
858
859 if (dd->multi_xfr) {
860 if (dd->write_len && !dd->read_len) {
861 dma_unmap_single(dev,
862 dd->cur_transfer->tx_dma,
863 dd->cur_msg_len,
864 DMA_TO_DEVICE);
865 } else {
866 struct spi_transfer *prev_xfr;
867 prev_xfr = list_entry(
868 dd->cur_transfer->transfer_list.prev,
869 struct spi_transfer,
870 transfer_list);
871 if (dd->cur_transfer->rx_buf) {
872 dma_unmap_single(dev,
873 dd->cur_transfer->rx_dma,
874 dd->cur_msg_len,
875 DMA_FROM_DEVICE);
876 }
877 if (prev_xfr->tx_buf) {
878 dma_unmap_single(dev,
879 prev_xfr->tx_dma,
880 prev_xfr->len,
881 DMA_TO_DEVICE);
882 }
883 if (dd->unaligned_len && dd->read_buf) {
884 offset = dd->cur_msg_len - dd->unaligned_len;
885 dma_coherent_post_ops();
886 memcpy(dd->read_buf + offset, dd->rx_padding,
887 dd->unaligned_len);
888 memcpy(dd->cur_transfer->rx_buf,
889 dd->read_buf + prev_xfr->len,
890 dd->cur_transfer->len);
891 }
892 }
893 kfree(dd->temp_buf);
894 dd->temp_buf = NULL;
895 return;
896 } else {
897 if (dd->cur_transfer->rx_buf)
898 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
899 dd->cur_transfer->len,
900 DMA_FROM_DEVICE);
901 if (dd->cur_transfer->tx_buf)
902 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
903 dd->cur_transfer->len,
904 DMA_TO_DEVICE);
905 }
906
907unmap_end:
908 /* If we padded the transfer, we copy it from the padding buf */
909 if (dd->unaligned_len && dd->read_buf) {
910 offset = dd->cur_transfer->len - dd->unaligned_len;
911 dma_coherent_post_ops();
912 memcpy(dd->read_buf + offset, dd->rx_padding,
913 dd->unaligned_len);
914 }
915}
916
917/**
918 * msm_use_dm - decides whether to use data mover for this
919 * transfer
920 * @dd: device
921 * @tr: transfer
922 *
923 * Start using DM if:
924 * 1. Transfer is longer than 3*block size.
925 * 2. Buffers should be aligned to cache line.
926 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
927 */
928static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
929 u8 bpw)
930{
931 u32 cache_line = dma_get_cache_alignment();
932
933 if (!dd->use_dma)
934 return 0;
935
936 if (dd->cur_msg_len < 3*dd->input_block_size)
937 return 0;
938
939 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
940 return 0;
941
942 if (tr->tx_buf) {
943 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
944 return 0;
945 }
946 if (tr->rx_buf) {
947 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
948 return 0;
949 }
950
951 if (tr->cs_change &&
952 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
953 return 0;
954 return 1;
955}
956
957static void msm_spi_process_transfer(struct msm_spi *dd)
958{
959 u8 bpw;
960 u32 spi_ioc;
961 u32 spi_iom;
962 u32 spi_ioc_orig;
963 u32 max_speed;
964 u32 chip_select;
965 u32 read_count;
966 u32 timeout;
967 u32 int_loopback = 0;
968
969 dd->tx_bytes_remaining = dd->cur_msg_len;
970 dd->rx_bytes_remaining = dd->cur_msg_len;
971 dd->read_buf = dd->cur_transfer->rx_buf;
972 dd->write_buf = dd->cur_transfer->tx_buf;
973 init_completion(&dd->transfer_complete);
974 if (dd->cur_transfer->bits_per_word)
975 bpw = dd->cur_transfer->bits_per_word;
976 else
977 if (dd->cur_msg->spi->bits_per_word)
978 bpw = dd->cur_msg->spi->bits_per_word;
979 else
980 bpw = 8;
981 dd->bytes_per_word = (bpw + 7) / 8;
982
983 if (dd->cur_transfer->speed_hz)
984 max_speed = dd->cur_transfer->speed_hz;
985 else
986 max_speed = dd->cur_msg->spi->max_speed_hz;
987 if (!dd->clock_speed || max_speed != dd->clock_speed)
988 msm_spi_clock_set(dd, max_speed);
989
990 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
991 if (dd->cur_msg->spi->mode & SPI_LOOP)
992 int_loopback = 1;
993 if (int_loopback && dd->multi_xfr &&
994 (read_count > dd->input_fifo_size)) {
995 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700996 pr_err(
997 "%s:Internal Loopback does not support > fifo size"
998 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999 __func__);
1000 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001001 pr_err(
1002 "%s:Internal Loopback does not support > fifo size"
1003 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004 __func__);
1005 return;
1006 }
1007 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1008 dd->mode = SPI_FIFO_MODE;
1009 if (dd->multi_xfr) {
1010 dd->read_len = dd->cur_transfer->len;
1011 dd->write_len = dd->cur_transfer->len;
1012 }
1013 /* read_count cannot exceed fifo_size, and only one READ COUNT
1014 interrupt is generated per transaction, so for transactions
1015 larger than fifo size READ COUNT must be disabled.
1016 For those transactions we usually move to Data Mover mode.
1017 */
1018 if (read_count <= dd->input_fifo_size) {
1019 writel_relaxed(read_count,
1020 dd->base + SPI_MX_READ_COUNT);
1021 msm_spi_set_write_count(dd, read_count);
1022 } else {
1023 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1024 msm_spi_set_write_count(dd, 0);
1025 }
1026 } else {
1027 dd->mode = SPI_DMOV_MODE;
1028 if (dd->write_len && dd->read_len) {
1029 dd->tx_bytes_remaining = dd->write_len;
1030 dd->rx_bytes_remaining = dd->read_len;
1031 }
1032 }
1033
1034 /* Write mode - fifo or data mover*/
1035 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1036 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1037 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1038 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1039 /* Turn on packing for data mover */
1040 if (dd->mode == SPI_DMOV_MODE)
1041 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1042 else
1043 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1044 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1045
1046 msm_spi_set_config(dd, bpw);
1047
1048 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1049 spi_ioc_orig = spi_ioc;
1050 if (dd->cur_msg->spi->mode & SPI_CPOL)
1051 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1052 else
1053 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1054 chip_select = dd->cur_msg->spi->chip_select << 2;
1055 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1056 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1057 if (!dd->cur_transfer->cs_change)
1058 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1059 if (spi_ioc != spi_ioc_orig)
1060 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1061
1062 if (dd->mode == SPI_DMOV_MODE) {
1063 msm_spi_setup_dm_transfer(dd);
1064 msm_spi_enqueue_dm_commands(dd);
1065 }
1066 /* The output fifo interrupt handler will handle all writes after
1067 the first. Restricting this to one write avoids contention
1068 issues and race conditions between this thread and the int handler
1069 */
1070 else if (dd->mode == SPI_FIFO_MODE) {
1071 if (msm_spi_prepare_for_write(dd))
1072 goto transfer_end;
1073 msm_spi_start_write(dd, read_count);
1074 }
1075
1076 /* Only enter the RUN state after the first word is written into
1077 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1078 might fire before the first word is written resulting in a
1079 possible race condition.
1080 */
1081 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1082 goto transfer_end;
1083
1084 timeout = 100 * msecs_to_jiffies(
1085 DIV_ROUND_UP(dd->cur_msg_len * 8,
1086 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1087
1088 /* Assume success, this might change later upon transaction result */
1089 dd->cur_msg->status = 0;
1090 do {
1091 if (!wait_for_completion_timeout(&dd->transfer_complete,
1092 timeout)) {
1093 dev_err(dd->dev, "%s: SPI transaction "
1094 "timeout\n", __func__);
1095 dd->cur_msg->status = -EIO;
1096 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001097 msm_dmov_flush(dd->tx_dma_chan, 1);
1098 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099 }
1100 break;
1101 }
1102 } while (msm_spi_dm_send_next(dd));
1103
1104transfer_end:
1105 if (dd->mode == SPI_DMOV_MODE)
1106 msm_spi_unmap_dma_buffers(dd);
1107 dd->mode = SPI_MODE_NONE;
1108
1109 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1110 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1111 dd->base + SPI_IO_CONTROL);
1112}
1113
1114static void get_transfer_length(struct msm_spi *dd)
1115{
1116 struct spi_transfer *tr;
1117 int num_xfrs = 0;
1118 int readlen = 0;
1119 int writelen = 0;
1120
1121 dd->cur_msg_len = 0;
1122 dd->multi_xfr = 0;
1123 dd->read_len = dd->write_len = 0;
1124
1125 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1126 if (tr->tx_buf)
1127 writelen += tr->len;
1128 if (tr->rx_buf)
1129 readlen += tr->len;
1130 dd->cur_msg_len += tr->len;
1131 num_xfrs++;
1132 }
1133
1134 if (num_xfrs == 2) {
1135 struct spi_transfer *first_xfr = dd->cur_transfer;
1136
1137 dd->multi_xfr = 1;
1138 tr = list_entry(first_xfr->transfer_list.next,
1139 struct spi_transfer,
1140 transfer_list);
1141 /*
1142 * We update dd->read_len and dd->write_len only
1143 * for WR-WR and WR-RD transfers.
1144 */
1145 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1146 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1147 ((!tr->tx_buf) && (tr->rx_buf))) {
1148 dd->read_len = readlen;
1149 dd->write_len = writelen;
1150 }
1151 }
1152 } else if (num_xfrs > 1)
1153 dd->multi_xfr = 1;
1154}
1155
1156static inline int combine_transfers(struct msm_spi *dd)
1157{
1158 struct spi_transfer *t = dd->cur_transfer;
1159 struct spi_transfer *nxt;
1160 int xfrs_grped = 1;
1161
1162 dd->cur_msg_len = dd->cur_transfer->len;
1163 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1164 nxt = list_entry(t->transfer_list.next,
1165 struct spi_transfer,
1166 transfer_list);
1167 if (t->cs_change != nxt->cs_change)
1168 return xfrs_grped;
1169 dd->cur_msg_len += nxt->len;
1170 xfrs_grped++;
1171 t = nxt;
1172 }
1173 return xfrs_grped;
1174}
1175
Harini Jayaraman093938a2012-04-20 15:33:23 -06001176static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1177{
1178 u32 spi_ioc;
1179 u32 spi_ioc_orig;
1180
1181 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1182 spi_ioc_orig = spi_ioc;
1183 if (set_flag)
1184 spi_ioc |= SPI_IO_C_FORCE_CS;
1185 else
1186 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1187
1188 if (spi_ioc != spi_ioc_orig)
1189 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1190}
1191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192static void msm_spi_process_message(struct msm_spi *dd)
1193{
1194 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001195 int cs_num;
1196 int rc;
1197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001199 cs_num = dd->cur_msg->spi->chip_select;
1200 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1201 (!(dd->cs_gpios[cs_num].valid)) &&
1202 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1203 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1204 spi_cs_rsrcs[cs_num]);
1205 if (rc) {
1206 dev_err(dd->dev, "gpio_request for pin %d failed with "
1207 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1208 rc);
1209 return;
1210 }
1211 dd->cs_gpios[cs_num].valid = 1;
1212 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001214 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001215 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001217 &dd->cur_msg->transfers,
1218 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001219 struct spi_transfer *t = dd->cur_transfer;
1220 struct spi_transfer *nxt;
1221
1222 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1223 nxt = list_entry(t->transfer_list.next,
1224 struct spi_transfer,
1225 transfer_list);
1226
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001227 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001228 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001229 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001230 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001232
1233 dd->cur_msg_len = dd->cur_transfer->len;
1234 msm_spi_process_transfer(dd);
1235 }
1236 } else {
1237 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1238 struct spi_transfer,
1239 transfer_list);
1240 get_transfer_length(dd);
1241 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1242 /*
1243 * Handling of multi-transfers.
1244 * FIFO mode is used by default
1245 */
1246 list_for_each_entry(dd->cur_transfer,
1247 &dd->cur_msg->transfers,
1248 transfer_list) {
1249 if (!dd->cur_transfer->len)
1250 goto error;
1251 if (xfrs_grped) {
1252 xfrs_grped--;
1253 continue;
1254 } else {
1255 dd->read_len = dd->write_len = 0;
1256 xfrs_grped = combine_transfers(dd);
1257 }
1258
1259 dd->cur_tx_transfer = dd->cur_transfer;
1260 dd->cur_rx_transfer = dd->cur_transfer;
1261 msm_spi_process_transfer(dd);
1262 xfrs_grped--;
1263 }
1264 } else {
1265 /* Handling of a single transfer or
1266 * WR-WR or WR-RD transfers
1267 */
1268 if ((!dd->cur_msg->is_dma_mapped) &&
1269 (msm_use_dm(dd, dd->cur_transfer,
1270 dd->cur_transfer->bits_per_word))) {
1271 /* Mapping of DMA buffers */
1272 int ret = msm_spi_map_dma_buffers(dd);
1273 if (ret < 0) {
1274 dd->cur_msg->status = ret;
1275 goto error;
1276 }
1277 }
1278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279 dd->cur_tx_transfer = dd->cur_transfer;
1280 dd->cur_rx_transfer = dd->cur_transfer;
1281 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001282 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001284
1285 return;
1286
1287error:
1288 if (dd->cs_gpios[cs_num].valid) {
1289 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1290 dd->cs_gpios[cs_num].valid = 0;
1291 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001292}
1293
1294/* workqueue - pull messages from queue & process */
1295static void msm_spi_workq(struct work_struct *work)
1296{
1297 struct msm_spi *dd =
1298 container_of(work, struct msm_spi, work_data);
1299 unsigned long flags;
1300 u32 status_error = 0;
Alok Chauhanb5f53792012-08-22 19:54:45 +05301301 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302
1303 mutex_lock(&dd->core_lock);
1304
1305 /* Don't allow power collapse until we release mutex */
1306 if (pm_qos_request_active(&qos_req_list))
1307 pm_qos_update_request(&qos_req_list,
1308 dd->pm_lat);
1309 if (dd->use_rlock)
1310 remote_mutex_lock(&dd->r_lock);
1311
Alok Chauhanb5f53792012-08-22 19:54:45 +05301312 /* Configure the spi clk, miso, mosi and cs gpio */
1313 if (dd->pdata->gpio_config) {
1314 rc = dd->pdata->gpio_config();
1315 if (rc) {
1316 dev_err(dd->dev,
1317 "%s: error configuring GPIOs\n",
1318 __func__);
1319 status_error = 1;
1320 }
1321 }
1322
1323 rc = msm_spi_request_gpios(dd);
1324 if (rc)
1325 status_error = 1;
1326
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001327 clk_prepare_enable(dd->clk);
1328 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 msm_spi_enable_irqs(dd);
1330
1331 if (!msm_spi_is_valid_state(dd)) {
1332 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1333 __func__);
1334 status_error = 1;
1335 }
1336
1337 spin_lock_irqsave(&dd->queue_lock, flags);
1338 while (!list_empty(&dd->queue)) {
1339 dd->cur_msg = list_entry(dd->queue.next,
1340 struct spi_message, queue);
1341 list_del_init(&dd->cur_msg->queue);
1342 spin_unlock_irqrestore(&dd->queue_lock, flags);
1343 if (status_error)
1344 dd->cur_msg->status = -EIO;
1345 else
1346 msm_spi_process_message(dd);
1347 if (dd->cur_msg->complete)
1348 dd->cur_msg->complete(dd->cur_msg->context);
1349 spin_lock_irqsave(&dd->queue_lock, flags);
1350 }
1351 dd->transfer_pending = 0;
1352 spin_unlock_irqrestore(&dd->queue_lock, flags);
1353
1354 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001355 clk_disable_unprepare(dd->clk);
1356 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357
Alok Chauhanb5f53792012-08-22 19:54:45 +05301358 /* Free the spi clk, miso, mosi, cs gpio */
1359 if (!rc && dd->pdata && dd->pdata->gpio_release)
1360 dd->pdata->gpio_release();
1361 if (!rc)
1362 msm_spi_free_gpios(dd);
1363
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364 if (dd->use_rlock)
1365 remote_mutex_unlock(&dd->r_lock);
1366
1367 if (pm_qos_request_active(&qos_req_list))
1368 pm_qos_update_request(&qos_req_list,
1369 PM_QOS_DEFAULT_VALUE);
1370
1371 mutex_unlock(&dd->core_lock);
1372 /* If needed, this can be done after the current message is complete,
1373 and work can be continued upon resume. No motivation for now. */
1374 if (dd->suspended)
1375 wake_up_interruptible(&dd->continue_suspend);
1376}
1377
1378static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1379{
1380 struct msm_spi *dd;
1381 unsigned long flags;
1382 struct spi_transfer *tr;
1383
1384 dd = spi_master_get_devdata(spi->master);
1385 if (dd->suspended)
1386 return -EBUSY;
1387
1388 if (list_empty(&msg->transfers) || !msg->complete)
1389 return -EINVAL;
1390
1391 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1392 /* Check message parameters */
1393 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1394 (tr->bits_per_word &&
1395 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1396 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1397 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1398 "tx=%p, rx=%p\n",
1399 tr->speed_hz, tr->bits_per_word,
1400 tr->tx_buf, tr->rx_buf);
1401 return -EINVAL;
1402 }
1403 }
1404
1405 spin_lock_irqsave(&dd->queue_lock, flags);
1406 if (dd->suspended) {
1407 spin_unlock_irqrestore(&dd->queue_lock, flags);
1408 return -EBUSY;
1409 }
1410 dd->transfer_pending = 1;
1411 list_add_tail(&msg->queue, &dd->queue);
1412 spin_unlock_irqrestore(&dd->queue_lock, flags);
1413 queue_work(dd->workqueue, &dd->work_data);
1414 return 0;
1415}
1416
1417static int msm_spi_setup(struct spi_device *spi)
1418{
1419 struct msm_spi *dd;
1420 int rc = 0;
1421 u32 spi_ioc;
1422 u32 spi_config;
1423 u32 mask;
1424
1425 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1426 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1427 __func__, spi->bits_per_word);
1428 rc = -EINVAL;
1429 }
1430 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1431 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1432 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1433 rc = -EINVAL;
1434 }
1435
1436 if (rc)
1437 goto err_setup_exit;
1438
1439 dd = spi_master_get_devdata(spi->master);
1440
1441 mutex_lock(&dd->core_lock);
1442 if (dd->suspended) {
1443 mutex_unlock(&dd->core_lock);
1444 return -EBUSY;
1445 }
1446
1447 if (dd->use_rlock)
1448 remote_mutex_lock(&dd->r_lock);
1449
Alok Chauhanb5f53792012-08-22 19:54:45 +05301450 /* Configure the spi clk, miso, mosi, cs gpio */
1451 if (dd->pdata->gpio_config) {
1452 rc = dd->pdata->gpio_config();
1453 if (rc) {
1454 dev_err(&spi->dev,
1455 "%s: error configuring GPIOs\n",
1456 __func__);
1457 rc = -ENXIO;
1458 goto err_setup_gpio;
1459 }
1460 }
1461
1462 rc = msm_spi_request_gpios(dd);
1463 if (rc) {
1464 rc = -ENXIO;
1465 goto err_setup_gpio;
1466 }
1467
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001468 clk_prepare_enable(dd->clk);
1469 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470
1471 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1472 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1473 if (spi->mode & SPI_CS_HIGH)
1474 spi_ioc |= mask;
1475 else
1476 spi_ioc &= ~mask;
1477 if (spi->mode & SPI_CPOL)
1478 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1479 else
1480 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1481
1482 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1483
1484 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1485 if (spi->mode & SPI_LOOP)
1486 spi_config |= SPI_CFG_LOOPBACK;
1487 else
1488 spi_config &= ~SPI_CFG_LOOPBACK;
1489 if (spi->mode & SPI_CPHA)
1490 spi_config &= ~SPI_CFG_INPUT_FIRST;
1491 else
1492 spi_config |= SPI_CFG_INPUT_FIRST;
1493 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1494
1495 /* Ensure previous write completed before disabling the clocks */
1496 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001497 clk_disable_unprepare(dd->clk);
1498 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499
Alok Chauhanb5f53792012-08-22 19:54:45 +05301500 /* Free the spi clk, miso, mosi, cs gpio */
1501 if (dd->pdata && dd->pdata->gpio_release)
1502 dd->pdata->gpio_release();
1503 msm_spi_free_gpios(dd);
1504
1505err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001506 if (dd->use_rlock)
1507 remote_mutex_unlock(&dd->r_lock);
1508 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509err_setup_exit:
1510 return rc;
1511}
1512
1513#ifdef CONFIG_DEBUG_FS
1514static int debugfs_iomem_x32_set(void *data, u64 val)
1515{
1516 writel_relaxed(val, data);
1517 /* Ensure the previous write completed. */
1518 mb();
1519 return 0;
1520}
1521
1522static int debugfs_iomem_x32_get(void *data, u64 *val)
1523{
1524 *val = readl_relaxed(data);
1525 /* Ensure the previous read completed. */
1526 mb();
1527 return 0;
1528}
1529
1530DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1531 debugfs_iomem_x32_set, "0x%08llx\n");
1532
1533static void spi_debugfs_init(struct msm_spi *dd)
1534{
1535 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1536 if (dd->dent_spi) {
1537 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001538
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001539 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1540 dd->debugfs_spi_regs[i] =
1541 debugfs_create_file(
1542 debugfs_spi_regs[i].name,
1543 debugfs_spi_regs[i].mode,
1544 dd->dent_spi,
1545 dd->base + debugfs_spi_regs[i].offset,
1546 &fops_iomem_x32);
1547 }
1548 }
1549}
1550
1551static void spi_debugfs_exit(struct msm_spi *dd)
1552{
1553 if (dd->dent_spi) {
1554 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001556 debugfs_remove_recursive(dd->dent_spi);
1557 dd->dent_spi = NULL;
1558 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1559 dd->debugfs_spi_regs[i] = NULL;
1560 }
1561}
1562#else
1563static void spi_debugfs_init(struct msm_spi *dd) {}
1564static void spi_debugfs_exit(struct msm_spi *dd) {}
1565#endif
1566
1567/* ===Device attributes begin=== */
1568static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1569 char *buf)
1570{
1571 struct spi_master *master = dev_get_drvdata(dev);
1572 struct msm_spi *dd = spi_master_get_devdata(master);
1573
1574 return snprintf(buf, PAGE_SIZE,
1575 "Device %s\n"
1576 "rx fifo_size = %d spi words\n"
1577 "tx fifo_size = %d spi words\n"
1578 "use_dma ? %s\n"
1579 "rx block size = %d bytes\n"
1580 "tx block size = %d bytes\n"
1581 "burst size = %d bytes\n"
1582 "DMA configuration:\n"
1583 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1584 "--statistics--\n"
1585 "Rx isrs = %d\n"
1586 "Tx isrs = %d\n"
1587 "DMA error = %d\n"
1588 "--debug--\n"
1589 "NA yet\n",
1590 dev_name(dev),
1591 dd->input_fifo_size,
1592 dd->output_fifo_size,
1593 dd->use_dma ? "yes" : "no",
1594 dd->input_block_size,
1595 dd->output_block_size,
1596 dd->burst_size,
1597 dd->tx_dma_chan,
1598 dd->rx_dma_chan,
1599 dd->tx_dma_crci,
1600 dd->rx_dma_crci,
1601 dd->stat_rx + dd->stat_dmov_rx,
1602 dd->stat_tx + dd->stat_dmov_tx,
1603 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1604 );
1605}
1606
1607/* Reset statistics on write */
1608static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1609 const char *buf, size_t count)
1610{
1611 struct msm_spi *dd = dev_get_drvdata(dev);
1612 dd->stat_rx = 0;
1613 dd->stat_tx = 0;
1614 dd->stat_dmov_rx = 0;
1615 dd->stat_dmov_tx = 0;
1616 dd->stat_dmov_rx_err = 0;
1617 dd->stat_dmov_tx_err = 0;
1618 return count;
1619}
1620
1621static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1622
1623static struct attribute *dev_attrs[] = {
1624 &dev_attr_stats.attr,
1625 NULL,
1626};
1627
1628static struct attribute_group dev_attr_grp = {
1629 .attrs = dev_attrs,
1630};
1631/* ===Device attributes end=== */
1632
1633/**
1634 * spi_dmov_tx_complete_func - DataMover tx completion callback
1635 *
1636 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1637 * spinlock @msm_dmov_lock held.
1638 */
1639static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1640 unsigned int result,
1641 struct msm_dmov_errdata *err)
1642{
1643 struct msm_spi *dd;
1644
1645 if (!(result & DMOV_RSLT_VALID)) {
1646 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1647 return;
1648 }
1649 /* restore original context */
1650 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301651 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301653 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1654 return;
1655 complete(&dd->transfer_complete);
1656 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 /* Error or flush */
1658 if (result & DMOV_RSLT_ERROR) {
1659 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1660 dd->stat_dmov_tx_err++;
1661 }
1662 if (result & DMOV_RSLT_FLUSH) {
1663 /*
1664 * Flushing normally happens in process of
1665 * removing, when we are waiting for outstanding
1666 * DMA commands to be flushed.
1667 */
1668 dev_info(dd->dev,
1669 "DMA channel flushed (0x%08x)\n", result);
1670 }
1671 if (err)
1672 dev_err(dd->dev,
1673 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1674 err->flush[0], err->flush[1], err->flush[2],
1675 err->flush[3], err->flush[4], err->flush[5]);
1676 dd->cur_msg->status = -EIO;
1677 complete(&dd->transfer_complete);
1678 }
1679}
1680
1681/**
1682 * spi_dmov_rx_complete_func - DataMover rx completion callback
1683 *
1684 * Executed in IRQ context (Data Mover's IRQ)
1685 * DataMover's spinlock @msm_dmov_lock held.
1686 */
1687static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1688 unsigned int result,
1689 struct msm_dmov_errdata *err)
1690{
1691 struct msm_spi *dd;
1692
1693 if (!(result & DMOV_RSLT_VALID)) {
1694 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1695 result, cmd);
1696 return;
1697 }
1698 /* restore original context */
1699 dd = container_of(cmd, struct msm_spi, rx_hdr);
1700 if (result & DMOV_RSLT_DONE) {
1701 dd->stat_dmov_rx++;
1702 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1703 return;
1704 complete(&dd->transfer_complete);
1705 } else {
1706 /** Error or flush */
1707 if (result & DMOV_RSLT_ERROR) {
1708 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1709 dd->stat_dmov_rx_err++;
1710 }
1711 if (result & DMOV_RSLT_FLUSH) {
1712 dev_info(dd->dev,
1713 "DMA channel flushed(0x%08x)\n", result);
1714 }
1715 if (err)
1716 dev_err(dd->dev,
1717 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1718 err->flush[0], err->flush[1], err->flush[2],
1719 err->flush[3], err->flush[4], err->flush[5]);
1720 dd->cur_msg->status = -EIO;
1721 complete(&dd->transfer_complete);
1722 }
1723}
1724
1725static inline u32 get_chunk_size(struct msm_spi *dd)
1726{
1727 u32 cache_line = dma_get_cache_alignment();
1728
1729 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1730 roundup(dd->burst_size, cache_line))*2;
1731}
1732
1733static void msm_spi_teardown_dma(struct msm_spi *dd)
1734{
1735 int limit = 0;
1736
1737 if (!dd->use_dma)
1738 return;
1739
1740 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001741 msm_dmov_flush(dd->tx_dma_chan, 1);
1742 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001743 msleep(10);
1744 }
1745
1746 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1747 dd->tx_dmov_cmd_dma);
1748 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1749 dd->tx_padding = dd->rx_padding = NULL;
1750}
1751
1752static __init int msm_spi_init_dma(struct msm_spi *dd)
1753{
1754 dmov_box *box;
1755 u32 cache_line = dma_get_cache_alignment();
1756
1757 /* Allocate all as one chunk, since all is smaller than page size */
1758
1759 /* We send NULL device, since it requires coherent_dma_mask id
1760 device definition, we're okay with using system pool */
1761 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1762 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1763 if (dd->tx_dmov_cmd == NULL)
1764 return -ENOMEM;
1765
1766 /* DMA addresses should be 64 bit aligned aligned */
1767 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1768 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1769 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1770 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1771
1772 /* Buffers should be aligned to cache line */
1773 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1774 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1775 sizeof(struct spi_dmov_cmd), cache_line);
1776 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1777 cache_line);
1778 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1779 cache_line);
1780
1781 /* Setup DM commands */
1782 box = &(dd->rx_dmov_cmd->box);
1783 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1784 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1785 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1786 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1787 offsetof(struct spi_dmov_cmd, cmd_ptr));
1788 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789
1790 box = &(dd->tx_dmov_cmd->box);
1791 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1792 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1793 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1794 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1795 offsetof(struct spi_dmov_cmd, cmd_ptr));
1796 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797
1798 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1799 CMD_DST_CRCI(dd->tx_dma_crci);
1800 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1801 SPI_OUTPUT_FIFO;
1802 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1803 CMD_SRC_CRCI(dd->rx_dma_crci);
1804 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1805 SPI_INPUT_FIFO;
1806
1807 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001808 msm_dmov_flush(dd->tx_dma_chan, 1);
1809 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001810
1811 return 0;
1812}
1813
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001814struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1815{
1816 struct device_node *node = pdev->dev.of_node;
1817 struct msm_spi_platform_data *pdata;
1818
1819 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1820 if (!pdata) {
1821 pr_err("Unable to allocate platform data\n");
1822 return NULL;
1823 }
1824
1825 of_property_read_u32(node, "spi-max-frequency",
1826 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05301827 of_property_read_u32(node, "infinite_mode",
1828 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001829
1830 return pdata;
1831}
1832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833static int __init msm_spi_probe(struct platform_device *pdev)
1834{
1835 struct spi_master *master;
1836 struct msm_spi *dd;
1837 struct resource *resource;
1838 int rc = -ENXIO;
1839 int locked = 0;
1840 int i = 0;
1841 int clk_enabled = 0;
1842 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001843 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001844 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001845
1846 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1847 if (!master) {
1848 rc = -ENOMEM;
1849 dev_err(&pdev->dev, "master allocation failed\n");
1850 goto err_probe_exit;
1851 }
1852
1853 master->bus_num = pdev->id;
1854 master->mode_bits = SPI_SUPPORTED_MODES;
1855 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1856 master->setup = msm_spi_setup;
1857 master->transfer = msm_spi_transfer;
1858 platform_set_drvdata(pdev, master);
1859 dd = spi_master_get_devdata(master);
1860
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001861 if (pdev->dev.of_node) {
1862 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1863 master->dev.of_node = pdev->dev.of_node;
1864 pdata = msm_spi_dt_to_pdata(pdev);
1865 if (!pdata) {
1866 rc = -ENOMEM;
1867 goto err_probe_exit;
1868 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001869
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06001870 rc = of_property_read_u32(pdev->dev.of_node,
1871 "cell-index", &pdev->id);
1872 if (rc)
1873 dev_warn(&pdev->dev,
1874 "using default bus_num %d\n", pdev->id);
1875 else
1876 master->bus_num = pdev->id;
1877
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001878 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1879 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1880 i, &flags);
1881 }
1882
1883 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1884 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1885 pdev->dev.of_node, "cs-gpios",
1886 i, &flags);
1887 dd->cs_gpios[i].valid = 0;
1888 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001889 } else {
1890 pdata = pdev->dev.platform_data;
1891 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001892
1893 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1894 resource = platform_get_resource(pdev, IORESOURCE_IO,
1895 i);
1896 dd->spi_gpios[i] = resource ? resource->start : -1;
1897 }
1898
1899 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1900 resource = platform_get_resource(pdev, IORESOURCE_IO,
1901 i + ARRAY_SIZE(spi_rsrcs));
1902 dd->cs_gpios[i].gpio_num = resource ?
1903 resource->start : -1;
1904 dd->cs_gpios[i].valid = 0;
1905 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001906 }
1907
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001909 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 if (!resource) {
1911 rc = -ENXIO;
1912 goto err_probe_res;
1913 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915 dd->mem_phys_addr = resource->start;
1916 dd->mem_size = resource_size(resource);
1917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001918 if (pdata) {
1919 if (pdata->dma_config) {
1920 rc = pdata->dma_config();
1921 if (rc) {
1922 dev_warn(&pdev->dev,
1923 "%s: DM mode not supported\n",
1924 __func__);
1925 dd->use_dma = 0;
1926 goto skip_dma_resources;
1927 }
1928 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001929 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 if (resource) {
1931 dd->rx_dma_chan = resource->start;
1932 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001933 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1934 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 if (!resource) {
1936 rc = -ENXIO;
1937 goto err_probe_res;
1938 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 dd->rx_dma_crci = resource->start;
1941 dd->tx_dma_crci = resource->end;
1942 dd->use_dma = 1;
1943 master->dma_alignment = dma_get_cache_alignment();
1944 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001945 }
1946
Alok Chauhanb5f53792012-08-22 19:54:45 +05301947skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06001948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949 spin_lock_init(&dd->queue_lock);
1950 mutex_init(&dd->core_lock);
1951 INIT_LIST_HEAD(&dd->queue);
1952 INIT_WORK(&dd->work_data, msm_spi_workq);
1953 init_waitqueue_head(&dd->continue_suspend);
1954 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001955 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956 if (!dd->workqueue)
1957 goto err_probe_workq;
1958
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001959 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1960 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961 rc = -ENXIO;
1962 goto err_probe_reqmem;
1963 }
1964
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001965 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1966 if (!dd->base) {
1967 rc = -ENOMEM;
1968 goto err_probe_reqmem;
1969 }
1970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001971 if (pdata && pdata->rsl_id) {
1972 struct remote_mutex_id rmid;
1973 rmid.r_spinlock_id = pdata->rsl_id;
1974 rmid.delay_us = SPI_TRYLOCK_DELAY;
1975
1976 rc = remote_mutex_init(&dd->r_lock, &rmid);
1977 if (rc) {
1978 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1979 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1980 __func__, rc);
1981 goto err_probe_rlock_init;
1982 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 dd->use_rlock = 1;
1985 dd->pm_lat = pdata->pm_lat;
Alok Chauhanb5f53792012-08-22 19:54:45 +05301986 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1987 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001989
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001990 mutex_lock(&dd->core_lock);
1991 if (dd->use_rlock)
1992 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001994 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001996 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001998 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001999 rc = PTR_ERR(dd->clk);
2000 goto err_probe_clk_get;
2001 }
2002
Matt Wagantallac294852011-08-17 15:44:58 -07002003 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002005 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002006 rc = PTR_ERR(dd->pclk);
2007 goto err_probe_pclk_get;
2008 }
2009
2010 if (pdata && pdata->max_clock_speed)
2011 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2012
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002013 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002014 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002015 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002016 __func__);
2017 goto err_probe_clk_enable;
2018 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002020 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002021 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002023 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024 __func__);
2025 goto err_probe_pclk_enable;
2026 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002028 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002029 rc = msm_spi_configure_gsbi(dd, pdev);
2030 if (rc)
2031 goto err_probe_gsbi;
2032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002033 msm_spi_calculate_fifo_size(dd);
2034 if (dd->use_dma) {
2035 rc = msm_spi_init_dma(dd);
2036 if (rc)
2037 goto err_probe_dma;
2038 }
2039
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002040 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002041 /*
2042 * The SPI core generates a bogus input overrun error on some targets,
2043 * when a transition from run to reset state occurs and if the FIFO has
2044 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2045 * bit.
2046 */
2047 msm_spi_enable_error_flags(dd);
2048
2049 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2050 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2051 if (rc)
2052 goto err_probe_state;
2053
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002054 clk_disable_unprepare(dd->clk);
2055 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002056 clk_enabled = 0;
2057 pclk_enabled = 0;
2058
2059 dd->suspended = 0;
2060 dd->transfer_pending = 0;
2061 dd->multi_xfr = 0;
2062 dd->mode = SPI_MODE_NONE;
2063
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002064 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002065 if (rc)
2066 goto err_probe_irq;
2067
2068 msm_spi_disable_irqs(dd);
2069 if (dd->use_rlock)
2070 remote_mutex_unlock(&dd->r_lock);
2071
2072 mutex_unlock(&dd->core_lock);
2073 locked = 0;
2074
2075 rc = spi_register_master(master);
2076 if (rc)
2077 goto err_probe_reg_master;
2078
2079 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2080 if (rc) {
2081 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2082 goto err_attrs;
2083 }
2084
2085 spi_debugfs_init(dd);
Kiran Gunda2b285652012-07-30 13:22:39 +05302086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087 return 0;
2088
2089err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002090 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002092err_probe_irq:
2093err_probe_state:
2094 msm_spi_teardown_dma(dd);
2095err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002096err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002097 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002098 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002099err_probe_pclk_enable:
2100 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002101 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102err_probe_clk_enable:
2103 clk_put(dd->pclk);
2104err_probe_pclk_get:
2105 clk_put(dd->clk);
2106err_probe_clk_get:
2107 if (locked) {
2108 if (dd->use_rlock)
2109 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002110
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002111 mutex_unlock(&dd->core_lock);
2112 }
2113err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002114err_probe_reqmem:
2115 destroy_workqueue(dd->workqueue);
2116err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002117err_probe_res:
2118 spi_master_put(master);
2119err_probe_exit:
2120 return rc;
2121}
2122
2123#ifdef CONFIG_PM
2124static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2125{
2126 struct spi_master *master = platform_get_drvdata(pdev);
2127 struct msm_spi *dd;
2128 unsigned long flags;
2129
2130 if (!master)
2131 goto suspend_exit;
2132 dd = spi_master_get_devdata(master);
2133 if (!dd)
2134 goto suspend_exit;
2135
2136 /* Make sure nothing is added to the queue while we're suspending */
2137 spin_lock_irqsave(&dd->queue_lock, flags);
2138 dd->suspended = 1;
2139 spin_unlock_irqrestore(&dd->queue_lock, flags);
2140
2141 /* Wait for transactions to end, or time out */
2142 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002143
2144suspend_exit:
2145 return 0;
2146}
2147
2148static int msm_spi_resume(struct platform_device *pdev)
2149{
2150 struct spi_master *master = platform_get_drvdata(pdev);
2151 struct msm_spi *dd;
2152
2153 if (!master)
2154 goto resume_exit;
2155 dd = spi_master_get_devdata(master);
2156 if (!dd)
2157 goto resume_exit;
2158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002159 dd->suspended = 0;
2160resume_exit:
2161 return 0;
2162}
2163#else
2164#define msm_spi_suspend NULL
2165#define msm_spi_resume NULL
2166#endif /* CONFIG_PM */
2167
2168static int __devexit msm_spi_remove(struct platform_device *pdev)
2169{
2170 struct spi_master *master = platform_get_drvdata(pdev);
2171 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172
2173 pm_qos_remove_request(&qos_req_list);
2174 spi_debugfs_exit(dd);
2175 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002177 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002179 clk_put(dd->clk);
2180 clk_put(dd->pclk);
2181 destroy_workqueue(dd->workqueue);
2182 platform_set_drvdata(pdev, 0);
2183 spi_unregister_master(master);
2184 spi_master_put(master);
2185
2186 return 0;
2187}
2188
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002189static struct of_device_id msm_spi_dt_match[] = {
2190 {
2191 .compatible = "qcom,spi-qup-v2",
2192 },
2193 {}
2194};
2195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002196static struct platform_driver msm_spi_driver = {
2197 .driver = {
2198 .name = SPI_DRV_NAME,
2199 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002200 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201 },
2202 .suspend = msm_spi_suspend,
2203 .resume = msm_spi_resume,
2204 .remove = __exit_p(msm_spi_remove),
2205};
2206
2207static int __init msm_spi_init(void)
2208{
2209 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2210}
2211module_init(msm_spi_init);
2212
2213static void __exit msm_spi_exit(void)
2214{
2215 platform_driver_unregister(&msm_spi_driver);
2216}
2217module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002218
2219MODULE_LICENSE("GPL v2");
2220MODULE_VERSION("0.4");
2221MODULE_ALIAS("platform:"SPI_DRV_NAME);