blob: fd989256ff665cc2d2d34c36eb9ac55c94fd45dc [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070046static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
47 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048{
49 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050 unsigned long gsbi_mem_phys_addr;
51 size_t gsbi_mem_size;
52 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070056 return 0;
57
58 gsbi_mem_phys_addr = resource->start;
59 gsbi_mem_size = resource_size(resource);
60 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
61 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063
64 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size);
66 if (!gsbi_base)
67 return -ENXIO;
68
69 /* Set GSBI to SPI mode */
70 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 return 0;
73}
74
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070077 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
78 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
79 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
80 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
81 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
82 if (dd->qup_ver)
83 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084}
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086static inline int msm_spi_request_gpios(struct msm_spi *dd)
87{
88 int i;
89 int result = 0;
90
91 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
92 if (dd->spi_gpios[i] >= 0) {
93 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
94 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060095 dev_err(dd->dev, "%s: gpio_request for pin %d "
96 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 dd->spi_gpios[i], result);
98 goto error;
99 }
100 }
101 }
102 return 0;
103
104error:
105 for (; --i >= 0;) {
106 if (dd->spi_gpios[i] >= 0)
107 gpio_free(dd->spi_gpios[i]);
108 }
109 return result;
110}
111
112static inline void msm_spi_free_gpios(struct msm_spi *dd)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
117 if (dd->spi_gpios[i] >= 0)
118 gpio_free(dd->spi_gpios[i]);
119 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600120
121 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
122 if (dd->cs_gpios[i].valid) {
123 gpio_free(dd->cs_gpios[i].gpio_num);
124 dd->cs_gpios[i].valid = 0;
125 }
126 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127}
128
129static void msm_spi_clock_set(struct msm_spi *dd, int speed)
130{
131 int rc;
132
133 rc = clk_set_rate(dd->clk, speed);
134 if (!rc)
135 dd->clock_speed = speed;
136}
137
138static int msm_spi_calculate_size(int *fifo_size,
139 int *block_size,
140 int block,
141 int mult)
142{
143 int words;
144
145 switch (block) {
146 case 0:
147 words = 1; /* 4 bytes */
148 break;
149 case 1:
150 words = 4; /* 16 bytes */
151 break;
152 case 2:
153 words = 8; /* 32 bytes */
154 break;
155 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 switch (mult) {
160 case 0:
161 *fifo_size = words * 2;
162 break;
163 case 1:
164 *fifo_size = words * 4;
165 break;
166 case 2:
167 *fifo_size = words * 8;
168 break;
169 case 3:
170 *fifo_size = words * 16;
171 break;
172 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 *block_size = words * sizeof(u32); /* in bytes */
177 return 0;
178}
179
180static void get_next_transfer(struct msm_spi *dd)
181{
182 struct spi_transfer *t = dd->cur_transfer;
183
184 if (t->transfer_list.next != &dd->cur_msg->transfers) {
185 dd->cur_transfer = list_entry(t->transfer_list.next,
186 struct spi_transfer,
187 transfer_list);
188 dd->write_buf = dd->cur_transfer->tx_buf;
189 dd->read_buf = dd->cur_transfer->rx_buf;
190 }
191}
192
193static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
194{
195 u32 spi_iom;
196 int block;
197 int mult;
198
199 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
200
201 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
202 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
203 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
204 block, mult)) {
205 goto fifo_size_err;
206 }
207
208 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
209 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
210 if (msm_spi_calculate_size(&dd->output_fifo_size,
211 &dd->output_block_size, block, mult)) {
212 goto fifo_size_err;
213 }
214 /* DM mode is not available for this block size */
215 if (dd->input_block_size == 4 || dd->output_block_size == 4)
216 dd->use_dma = 0;
217
218 /* DM mode is currently unsupported for different block sizes */
219 if (dd->input_block_size != dd->output_block_size)
220 dd->use_dma = 0;
221
222 if (dd->use_dma)
223 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
224
225 return;
226
227fifo_size_err:
228 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700229 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return;
231}
232
233static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
234{
235 u32 data_in;
236 int i;
237 int shift;
238
239 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
240 if (dd->read_buf) {
241 for (i = 0; (i < dd->bytes_per_word) &&
242 dd->rx_bytes_remaining; i++) {
243 /* The data format depends on bytes_per_word:
244 4 bytes: 0x12345678
245 3 bytes: 0x00123456
246 2 bytes: 0x00001234
247 1 byte : 0x00000012
248 */
249 shift = 8 * (dd->bytes_per_word - i - 1);
250 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
251 dd->rx_bytes_remaining--;
252 }
253 } else {
254 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
255 dd->rx_bytes_remaining -= dd->bytes_per_word;
256 else
257 dd->rx_bytes_remaining = 0;
258 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 dd->read_xfr_cnt++;
261 if (dd->multi_xfr) {
262 if (!dd->rx_bytes_remaining)
263 dd->read_xfr_cnt = 0;
264 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
265 dd->read_len) {
266 struct spi_transfer *t = dd->cur_rx_transfer;
267 if (t->transfer_list.next != &dd->cur_msg->transfers) {
268 t = list_entry(t->transfer_list.next,
269 struct spi_transfer,
270 transfer_list);
271 dd->read_buf = t->rx_buf;
272 dd->read_len = t->len;
273 dd->read_xfr_cnt = 0;
274 dd->cur_rx_transfer = t;
275 }
276 }
277 }
278}
279
280static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
281{
282 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
283
284 return spi_op & SPI_OP_STATE_VALID;
285}
286
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600287static inline void msm_spi_udelay(unsigned long delay_usecs)
288{
289 /*
290 * For smaller values of delay, context switch time
291 * would negate the usage of usleep
292 */
293 if (delay_usecs > 20)
294 usleep_range(delay_usecs, delay_usecs);
295 else if (delay_usecs)
296 udelay(delay_usecs);
297}
298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299static inline int msm_spi_wait_valid(struct msm_spi *dd)
300{
301 unsigned long delay = 0;
302 unsigned long timeout = 0;
303
304 if (dd->clock_speed == 0)
305 return -EINVAL;
306 /*
307 * Based on the SPI clock speed, sufficient time
308 * should be given for the SPI state transition
309 * to occur
310 */
311 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
312 /*
313 * For small delay values, the default timeout would
314 * be one jiffy
315 */
316 if (delay < SPI_DELAY_THRESHOLD)
317 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600318
319 /* Adding one to round off to the nearest jiffy */
320 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 while (!msm_spi_is_valid_state(dd)) {
322 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600323 if (!msm_spi_is_valid_state(dd)) {
324 if (dd->cur_msg)
325 dd->cur_msg->status = -EIO;
326 dev_err(dd->dev, "%s: SPI operational state"
327 "not valid\n", __func__);
328 return -ETIMEDOUT;
329 } else
330 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331 }
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600332 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333 }
334 return 0;
335}
336
337static inline int msm_spi_set_state(struct msm_spi *dd,
338 enum msm_spi_state state)
339{
340 enum msm_spi_state cur_state;
341 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700342 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 cur_state = readl_relaxed(dd->base + SPI_STATE);
344 /* Per spec:
345 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
346 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
347 (state == SPI_OP_STATE_RESET)) {
348 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
349 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
350 } else {
351 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
352 dd->base + SPI_STATE);
353 }
354 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700355 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
357 return 0;
358}
359
360static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
361{
362 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
363
364 if (n != (*config & SPI_CFG_N))
365 *config = (*config & ~SPI_CFG_N) | n;
366
367 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
368 if (dd->read_buf == NULL)
369 *config |= SPI_NO_INPUT;
370 if (dd->write_buf == NULL)
371 *config |= SPI_NO_OUTPUT;
372 }
373}
374
375static void msm_spi_set_config(struct msm_spi *dd, int bpw)
376{
377 u32 spi_config;
378
379 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
380
381 if (dd->cur_msg->spi->mode & SPI_CPHA)
382 spi_config &= ~SPI_CFG_INPUT_FIRST;
383 else
384 spi_config |= SPI_CFG_INPUT_FIRST;
385 if (dd->cur_msg->spi->mode & SPI_LOOP)
386 spi_config |= SPI_CFG_LOOPBACK;
387 else
388 spi_config &= ~SPI_CFG_LOOPBACK;
389 msm_spi_add_configs(dd, &spi_config, bpw-1);
390 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
391 msm_spi_set_qup_config(dd, bpw);
392}
393
394static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
395{
396 dmov_box *box;
397 int bytes_to_send, num_rows, bytes_sent;
398 u32 num_transfers;
399
400 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530401 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 if (dd->write_len && !dd->read_len) {
403 /* WR-WR transfer */
404 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
405 dd->write_buf = dd->temp_buf;
406 } else {
407 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
408 /* For WR-RD transfer, bytes_sent can be negative */
409 if (bytes_sent < 0)
410 bytes_sent = 0;
411 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530412 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gunda2b285652012-07-30 13:22:39 +0530413 * 4K bytes for targets that have only 12 bits in
414 * QUP_MAX_OUTPUT_CNT register. If the target supports
415 * more than 12bits then we send the data in chunks of
416 * the infinite_mode value that is defined in the
417 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530418 */
419 if (!dd->pdata->infinite_mode)
Kiran Gunda2b285652012-07-30 13:22:39 +0530420 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530421 else
Kiran Gunda2b285652012-07-30 13:22:39 +0530422 dd->max_trfr_len = (dd->pdata->infinite_mode) *
423 (dd->bytes_per_word);
424
425 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
426 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
429 dd->unaligned_len = bytes_to_send % dd->burst_size;
430 num_rows = bytes_to_send / dd->burst_size;
431
432 dd->mode = SPI_DMOV_MODE;
433
434 if (num_rows) {
435 /* src in 16 MSB, dst in 16 LSB */
436 box = &dd->tx_dmov_cmd->box;
437 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
438 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
439 box->num_rows = (num_rows << 16) | num_rows;
440 box->row_offset = (dd->burst_size << 16) | 0;
441
442 box = &dd->rx_dmov_cmd->box;
443 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
444 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
445 box->num_rows = (num_rows << 16) | num_rows;
446 box->row_offset = (0 << 16) | dd->burst_size;
447
448 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
449 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
450 offsetof(struct spi_dmov_cmd, box));
451 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
452 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
453 offsetof(struct spi_dmov_cmd, box));
454 } else {
455 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
456 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
457 offsetof(struct spi_dmov_cmd, single_pad));
458 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
459 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
460 offsetof(struct spi_dmov_cmd, single_pad));
461 }
462
463 if (!dd->unaligned_len) {
464 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
465 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
466 } else {
467 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
468 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
469 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
470
471 if ((dd->multi_xfr) && (dd->read_len <= 0))
472 offset = dd->cur_msg_len - dd->unaligned_len;
473
474 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
475 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
476
477 memset(dd->tx_padding, 0, dd->burst_size);
478 memset(dd->rx_padding, 0, dd->burst_size);
479 if (dd->write_buf)
480 memcpy(dd->tx_padding, dd->write_buf + offset,
481 dd->unaligned_len);
482
483 tx_cmd->src = dd->tx_padding_dma;
484 rx_cmd->dst = dd->rx_padding_dma;
485 tx_cmd->len = rx_cmd->len = dd->burst_size;
486 }
487 /* This also takes care of the padding dummy buf
488 Since this is set to the correct length, the
489 dummy bytes won't be actually sent */
490 if (dd->multi_xfr) {
491 u32 write_transfers = 0;
492 u32 read_transfers = 0;
493
494 if (dd->write_len > 0) {
495 write_transfers = DIV_ROUND_UP(dd->write_len,
496 dd->bytes_per_word);
497 writel_relaxed(write_transfers,
498 dd->base + SPI_MX_OUTPUT_COUNT);
499 }
500 if (dd->read_len > 0) {
501 /*
502 * The read following a write transfer must take
503 * into account, that the bytes pertaining to
504 * the write transfer needs to be discarded,
505 * before the actual read begins.
506 */
507 read_transfers = DIV_ROUND_UP(dd->read_len +
508 dd->write_len,
509 dd->bytes_per_word);
510 writel_relaxed(read_transfers,
511 dd->base + SPI_MX_INPUT_COUNT);
512 }
513 } else {
514 if (dd->write_buf)
515 writel_relaxed(num_transfers,
516 dd->base + SPI_MX_OUTPUT_COUNT);
517 if (dd->read_buf)
518 writel_relaxed(num_transfers,
519 dd->base + SPI_MX_INPUT_COUNT);
520 }
521}
522
523static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
524{
525 dma_coherent_pre_ops();
526 if (dd->write_buf)
527 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
528 if (dd->read_buf)
529 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
530}
531
Kiran Gunda2b285652012-07-30 13:22:39 +0530532/* SPI core on targets that does not support infinite mode can send
533 maximum of 4K transfers or 64K transfers depending up on size of
534 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
535 chunks. Upon completion we send the next chunk, or complete the
536 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530537 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538*/
539static int msm_spi_dm_send_next(struct msm_spi *dd)
540{
541 /* By now we should have sent all the bytes in FIFO mode,
542 * However to make things right, we'll check anyway.
543 */
544 if (dd->mode != SPI_DMOV_MODE)
545 return 0;
546
Kiran Gundae8f16742012-06-27 10:06:32 +0530547 /* On targets which does not support infinite mode,
548 We need to send more chunks, if we sent max last time */
Kiran Gunda2b285652012-07-30 13:22:39 +0530549 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
550 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
552 return 0;
553 dd->read_len = dd->write_len = 0;
554 msm_spi_setup_dm_transfer(dd);
555 msm_spi_enqueue_dm_commands(dd);
556 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
557 return 0;
558 return 1;
559 } else if (dd->read_len && dd->write_len) {
560 dd->tx_bytes_remaining -= dd->cur_transfer->len;
561 if (list_is_last(&dd->cur_transfer->transfer_list,
562 &dd->cur_msg->transfers))
563 return 0;
564 get_next_transfer(dd);
565 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
566 return 0;
567 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
568 dd->read_buf = dd->temp_buf;
569 dd->read_len = dd->write_len = -1;
570 msm_spi_setup_dm_transfer(dd);
571 msm_spi_enqueue_dm_commands(dd);
572 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
573 return 0;
574 return 1;
575 }
576 return 0;
577}
578
579static inline void msm_spi_ack_transfer(struct msm_spi *dd)
580{
581 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
582 SPI_OP_MAX_OUTPUT_DONE_FLAG,
583 dd->base + SPI_OPERATIONAL);
584 /* Ensure done flag was cleared before proceeding further */
585 mb();
586}
587
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700588/* Figure which irq occured and call the relevant functions */
589static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
590{
591 u32 op, ret = IRQ_NONE;
592 struct msm_spi *dd = dev_id;
593
594 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
595 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
596 struct spi_master *master = dev_get_drvdata(dd->dev);
597 ret |= msm_spi_error_irq(irq, master);
598 }
599
600 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
601 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
602 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
603 dd->base + SPI_OPERATIONAL);
604 /*
605 * Ensure service flag was cleared before further
606 * processing of interrupt.
607 */
608 mb();
609 ret |= msm_spi_input_irq(irq, dev_id);
610 }
611
612 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
613 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
614 dd->base + SPI_OPERATIONAL);
615 /*
616 * Ensure service flag was cleared before further
617 * processing of interrupt.
618 */
619 mb();
620 ret |= msm_spi_output_irq(irq, dev_id);
621 }
622
623 if (dd->done) {
624 complete(&dd->transfer_complete);
625 dd->done = 0;
626 }
627 return ret;
628}
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
631{
632 struct msm_spi *dd = dev_id;
633
634 dd->stat_rx++;
635
636 if (dd->mode == SPI_MODE_NONE)
637 return IRQ_HANDLED;
638
639 if (dd->mode == SPI_DMOV_MODE) {
640 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
641 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
642 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
643 msm_spi_ack_transfer(dd);
644 if (dd->unaligned_len == 0) {
645 if (atomic_inc_return(&dd->rx_irq_called) == 1)
646 return IRQ_HANDLED;
647 }
648 msm_spi_complete(dd);
649 return IRQ_HANDLED;
650 }
651 return IRQ_NONE;
652 }
653
654 if (dd->mode == SPI_FIFO_MODE) {
655 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
656 SPI_OP_IP_FIFO_NOT_EMPTY) &&
657 (dd->rx_bytes_remaining > 0)) {
658 msm_spi_read_word_from_fifo(dd);
659 }
660 if (dd->rx_bytes_remaining == 0)
661 msm_spi_complete(dd);
662 }
663
664 return IRQ_HANDLED;
665}
666
667static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
668{
669 u32 word;
670 u8 byte;
671 int i;
672
673 word = 0;
674 if (dd->write_buf) {
675 for (i = 0; (i < dd->bytes_per_word) &&
676 dd->tx_bytes_remaining; i++) {
677 dd->tx_bytes_remaining--;
678 byte = *dd->write_buf++;
679 word |= (byte << (BITS_PER_BYTE * (3 - i)));
680 }
681 } else
682 if (dd->tx_bytes_remaining > dd->bytes_per_word)
683 dd->tx_bytes_remaining -= dd->bytes_per_word;
684 else
685 dd->tx_bytes_remaining = 0;
686 dd->write_xfr_cnt++;
687 if (dd->multi_xfr) {
688 if (!dd->tx_bytes_remaining)
689 dd->write_xfr_cnt = 0;
690 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
691 dd->write_len) {
692 struct spi_transfer *t = dd->cur_tx_transfer;
693 if (t->transfer_list.next != &dd->cur_msg->transfers) {
694 t = list_entry(t->transfer_list.next,
695 struct spi_transfer,
696 transfer_list);
697 dd->write_buf = t->tx_buf;
698 dd->write_len = t->len;
699 dd->write_xfr_cnt = 0;
700 dd->cur_tx_transfer = t;
701 }
702 }
703 }
704 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
705}
706
707static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
708{
709 int count = 0;
710
711 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
712 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
713 SPI_OP_OUTPUT_FIFO_FULL)) {
714 msm_spi_write_word_to_fifo(dd);
715 count++;
716 }
717}
718
719static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
720{
721 struct msm_spi *dd = dev_id;
722
723 dd->stat_tx++;
724
725 if (dd->mode == SPI_MODE_NONE)
726 return IRQ_HANDLED;
727
728 if (dd->mode == SPI_DMOV_MODE) {
729 /* TX_ONLY transaction is handled here
730 This is the only place we send complete at tx and not rx */
731 if (dd->read_buf == NULL &&
732 readl_relaxed(dd->base + SPI_OPERATIONAL) &
733 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
734 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530735 if (atomic_inc_return(&dd->tx_irq_called) == 1)
736 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737 msm_spi_complete(dd);
738 return IRQ_HANDLED;
739 }
740 return IRQ_NONE;
741 }
742
743 /* Output FIFO is empty. Transmit any outstanding write data. */
744 if (dd->mode == SPI_FIFO_MODE)
745 msm_spi_write_rmn_to_fifo(dd);
746
747 return IRQ_HANDLED;
748}
749
750static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
751{
752 struct spi_master *master = dev_id;
753 struct msm_spi *dd = spi_master_get_devdata(master);
754 u32 spi_err;
755
756 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
757 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
758 dev_warn(master->dev.parent, "SPI output overrun error\n");
759 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
760 dev_warn(master->dev.parent, "SPI input underrun error\n");
761 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
762 dev_warn(master->dev.parent, "SPI output underrun error\n");
763 msm_spi_get_clk_err(dd, &spi_err);
764 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
765 dev_warn(master->dev.parent, "SPI clock overrun error\n");
766 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
767 dev_warn(master->dev.parent, "SPI clock underrun error\n");
768 msm_spi_clear_error_flags(dd);
769 msm_spi_ack_clk_err(dd);
770 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
771 mb();
772 return IRQ_HANDLED;
773}
774
775static int msm_spi_map_dma_buffers(struct msm_spi *dd)
776{
777 struct device *dev;
778 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600779 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780 void *tx_buf, *rx_buf;
781 unsigned tx_len, rx_len;
782 int ret = -EINVAL;
783
784 dev = &dd->cur_msg->spi->dev;
785 first_xfr = dd->cur_transfer;
786 tx_buf = (void *)first_xfr->tx_buf;
787 rx_buf = first_xfr->rx_buf;
788 tx_len = rx_len = first_xfr->len;
789
790 /*
791 * For WR-WR and WR-RD transfers, we allocate our own temporary
792 * buffer and copy the data to/from the client buffers.
793 */
794 if (dd->multi_xfr) {
795 dd->temp_buf = kzalloc(dd->cur_msg_len,
796 GFP_KERNEL | __GFP_DMA);
797 if (!dd->temp_buf)
798 return -ENOMEM;
799 nxt_xfr = list_entry(first_xfr->transfer_list.next,
800 struct spi_transfer, transfer_list);
801
802 if (dd->write_len && !dd->read_len) {
803 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
804 goto error;
805
806 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
807 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
808 nxt_xfr->len);
809 tx_buf = dd->temp_buf;
810 tx_len = dd->cur_msg_len;
811 } else {
812 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
813 goto error;
814
815 rx_buf = dd->temp_buf;
816 rx_len = dd->cur_msg_len;
817 }
818 }
819 if (tx_buf != NULL) {
820 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
821 tx_len, DMA_TO_DEVICE);
822 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
823 dev_err(dev, "dma %cX %d bytes error\n",
824 'T', tx_len);
825 ret = -ENOMEM;
826 goto error;
827 }
828 }
829 if (rx_buf != NULL) {
830 dma_addr_t dma_handle;
831 dma_handle = dma_map_single(dev, rx_buf,
832 rx_len, DMA_FROM_DEVICE);
833 if (dma_mapping_error(NULL, dma_handle)) {
834 dev_err(dev, "dma %cX %d bytes error\n",
835 'R', rx_len);
836 if (tx_buf != NULL)
837 dma_unmap_single(NULL, first_xfr->tx_dma,
838 tx_len, DMA_TO_DEVICE);
839 ret = -ENOMEM;
840 goto error;
841 }
842 if (dd->multi_xfr)
843 nxt_xfr->rx_dma = dma_handle;
844 else
845 first_xfr->rx_dma = dma_handle;
846 }
847 return 0;
848
849error:
850 kfree(dd->temp_buf);
851 dd->temp_buf = NULL;
852 return ret;
853}
854
855static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
856{
857 struct device *dev;
858 u32 offset;
859
860 dev = &dd->cur_msg->spi->dev;
861 if (dd->cur_msg->is_dma_mapped)
862 goto unmap_end;
863
864 if (dd->multi_xfr) {
865 if (dd->write_len && !dd->read_len) {
866 dma_unmap_single(dev,
867 dd->cur_transfer->tx_dma,
868 dd->cur_msg_len,
869 DMA_TO_DEVICE);
870 } else {
871 struct spi_transfer *prev_xfr;
872 prev_xfr = list_entry(
873 dd->cur_transfer->transfer_list.prev,
874 struct spi_transfer,
875 transfer_list);
876 if (dd->cur_transfer->rx_buf) {
877 dma_unmap_single(dev,
878 dd->cur_transfer->rx_dma,
879 dd->cur_msg_len,
880 DMA_FROM_DEVICE);
881 }
882 if (prev_xfr->tx_buf) {
883 dma_unmap_single(dev,
884 prev_xfr->tx_dma,
885 prev_xfr->len,
886 DMA_TO_DEVICE);
887 }
888 if (dd->unaligned_len && dd->read_buf) {
889 offset = dd->cur_msg_len - dd->unaligned_len;
890 dma_coherent_post_ops();
891 memcpy(dd->read_buf + offset, dd->rx_padding,
892 dd->unaligned_len);
893 memcpy(dd->cur_transfer->rx_buf,
894 dd->read_buf + prev_xfr->len,
895 dd->cur_transfer->len);
896 }
897 }
898 kfree(dd->temp_buf);
899 dd->temp_buf = NULL;
900 return;
901 } else {
902 if (dd->cur_transfer->rx_buf)
903 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
904 dd->cur_transfer->len,
905 DMA_FROM_DEVICE);
906 if (dd->cur_transfer->tx_buf)
907 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
908 dd->cur_transfer->len,
909 DMA_TO_DEVICE);
910 }
911
912unmap_end:
913 /* If we padded the transfer, we copy it from the padding buf */
914 if (dd->unaligned_len && dd->read_buf) {
915 offset = dd->cur_transfer->len - dd->unaligned_len;
916 dma_coherent_post_ops();
917 memcpy(dd->read_buf + offset, dd->rx_padding,
918 dd->unaligned_len);
919 }
920}
921
922/**
923 * msm_use_dm - decides whether to use data mover for this
924 * transfer
925 * @dd: device
926 * @tr: transfer
927 *
928 * Start using DM if:
929 * 1. Transfer is longer than 3*block size.
930 * 2. Buffers should be aligned to cache line.
931 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
932 */
933static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
934 u8 bpw)
935{
936 u32 cache_line = dma_get_cache_alignment();
937
938 if (!dd->use_dma)
939 return 0;
940
941 if (dd->cur_msg_len < 3*dd->input_block_size)
942 return 0;
943
944 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
945 return 0;
946
947 if (tr->tx_buf) {
948 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
949 return 0;
950 }
951 if (tr->rx_buf) {
952 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
953 return 0;
954 }
955
956 if (tr->cs_change &&
957 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
958 return 0;
959 return 1;
960}
961
962static void msm_spi_process_transfer(struct msm_spi *dd)
963{
964 u8 bpw;
965 u32 spi_ioc;
966 u32 spi_iom;
967 u32 spi_ioc_orig;
968 u32 max_speed;
969 u32 chip_select;
970 u32 read_count;
971 u32 timeout;
972 u32 int_loopback = 0;
973
974 dd->tx_bytes_remaining = dd->cur_msg_len;
975 dd->rx_bytes_remaining = dd->cur_msg_len;
976 dd->read_buf = dd->cur_transfer->rx_buf;
977 dd->write_buf = dd->cur_transfer->tx_buf;
978 init_completion(&dd->transfer_complete);
979 if (dd->cur_transfer->bits_per_word)
980 bpw = dd->cur_transfer->bits_per_word;
981 else
982 if (dd->cur_msg->spi->bits_per_word)
983 bpw = dd->cur_msg->spi->bits_per_word;
984 else
985 bpw = 8;
986 dd->bytes_per_word = (bpw + 7) / 8;
987
988 if (dd->cur_transfer->speed_hz)
989 max_speed = dd->cur_transfer->speed_hz;
990 else
991 max_speed = dd->cur_msg->spi->max_speed_hz;
992 if (!dd->clock_speed || max_speed != dd->clock_speed)
993 msm_spi_clock_set(dd, max_speed);
994
995 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
996 if (dd->cur_msg->spi->mode & SPI_LOOP)
997 int_loopback = 1;
998 if (int_loopback && dd->multi_xfr &&
999 (read_count > dd->input_fifo_size)) {
1000 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001001 pr_err(
1002 "%s:Internal Loopback does not support > fifo size"
1003 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004 __func__);
1005 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001006 pr_err(
1007 "%s:Internal Loopback does not support > fifo size"
1008 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 __func__);
1010 return;
1011 }
1012 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1013 dd->mode = SPI_FIFO_MODE;
1014 if (dd->multi_xfr) {
1015 dd->read_len = dd->cur_transfer->len;
1016 dd->write_len = dd->cur_transfer->len;
1017 }
1018 /* read_count cannot exceed fifo_size, and only one READ COUNT
1019 interrupt is generated per transaction, so for transactions
1020 larger than fifo size READ COUNT must be disabled.
1021 For those transactions we usually move to Data Mover mode.
1022 */
1023 if (read_count <= dd->input_fifo_size) {
1024 writel_relaxed(read_count,
1025 dd->base + SPI_MX_READ_COUNT);
1026 msm_spi_set_write_count(dd, read_count);
1027 } else {
1028 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1029 msm_spi_set_write_count(dd, 0);
1030 }
1031 } else {
1032 dd->mode = SPI_DMOV_MODE;
1033 if (dd->write_len && dd->read_len) {
1034 dd->tx_bytes_remaining = dd->write_len;
1035 dd->rx_bytes_remaining = dd->read_len;
1036 }
1037 }
1038
1039 /* Write mode - fifo or data mover*/
1040 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1041 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1042 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1043 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1044 /* Turn on packing for data mover */
1045 if (dd->mode == SPI_DMOV_MODE)
1046 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1047 else
1048 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1049 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1050
1051 msm_spi_set_config(dd, bpw);
1052
1053 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1054 spi_ioc_orig = spi_ioc;
1055 if (dd->cur_msg->spi->mode & SPI_CPOL)
1056 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1057 else
1058 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1059 chip_select = dd->cur_msg->spi->chip_select << 2;
1060 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1061 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1062 if (!dd->cur_transfer->cs_change)
1063 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1064 if (spi_ioc != spi_ioc_orig)
1065 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1066
1067 if (dd->mode == SPI_DMOV_MODE) {
1068 msm_spi_setup_dm_transfer(dd);
1069 msm_spi_enqueue_dm_commands(dd);
1070 }
1071 /* The output fifo interrupt handler will handle all writes after
1072 the first. Restricting this to one write avoids contention
1073 issues and race conditions between this thread and the int handler
1074 */
1075 else if (dd->mode == SPI_FIFO_MODE) {
1076 if (msm_spi_prepare_for_write(dd))
1077 goto transfer_end;
1078 msm_spi_start_write(dd, read_count);
1079 }
1080
1081 /* Only enter the RUN state after the first word is written into
1082 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1083 might fire before the first word is written resulting in a
1084 possible race condition.
1085 */
1086 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1087 goto transfer_end;
1088
1089 timeout = 100 * msecs_to_jiffies(
1090 DIV_ROUND_UP(dd->cur_msg_len * 8,
1091 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1092
1093 /* Assume success, this might change later upon transaction result */
1094 dd->cur_msg->status = 0;
1095 do {
1096 if (!wait_for_completion_timeout(&dd->transfer_complete,
1097 timeout)) {
1098 dev_err(dd->dev, "%s: SPI transaction "
1099 "timeout\n", __func__);
1100 dd->cur_msg->status = -EIO;
1101 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001102 msm_dmov_flush(dd->tx_dma_chan, 1);
1103 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 }
1105 break;
1106 }
1107 } while (msm_spi_dm_send_next(dd));
1108
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001109 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001110transfer_end:
1111 if (dd->mode == SPI_DMOV_MODE)
1112 msm_spi_unmap_dma_buffers(dd);
1113 dd->mode = SPI_MODE_NONE;
1114
1115 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1116 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1117 dd->base + SPI_IO_CONTROL);
1118}
1119
1120static void get_transfer_length(struct msm_spi *dd)
1121{
1122 struct spi_transfer *tr;
1123 int num_xfrs = 0;
1124 int readlen = 0;
1125 int writelen = 0;
1126
1127 dd->cur_msg_len = 0;
1128 dd->multi_xfr = 0;
1129 dd->read_len = dd->write_len = 0;
1130
1131 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1132 if (tr->tx_buf)
1133 writelen += tr->len;
1134 if (tr->rx_buf)
1135 readlen += tr->len;
1136 dd->cur_msg_len += tr->len;
1137 num_xfrs++;
1138 }
1139
1140 if (num_xfrs == 2) {
1141 struct spi_transfer *first_xfr = dd->cur_transfer;
1142
1143 dd->multi_xfr = 1;
1144 tr = list_entry(first_xfr->transfer_list.next,
1145 struct spi_transfer,
1146 transfer_list);
1147 /*
1148 * We update dd->read_len and dd->write_len only
1149 * for WR-WR and WR-RD transfers.
1150 */
1151 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1152 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1153 ((!tr->tx_buf) && (tr->rx_buf))) {
1154 dd->read_len = readlen;
1155 dd->write_len = writelen;
1156 }
1157 }
1158 } else if (num_xfrs > 1)
1159 dd->multi_xfr = 1;
1160}
1161
1162static inline int combine_transfers(struct msm_spi *dd)
1163{
1164 struct spi_transfer *t = dd->cur_transfer;
1165 struct spi_transfer *nxt;
1166 int xfrs_grped = 1;
1167
1168 dd->cur_msg_len = dd->cur_transfer->len;
1169 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1170 nxt = list_entry(t->transfer_list.next,
1171 struct spi_transfer,
1172 transfer_list);
1173 if (t->cs_change != nxt->cs_change)
1174 return xfrs_grped;
1175 dd->cur_msg_len += nxt->len;
1176 xfrs_grped++;
1177 t = nxt;
1178 }
1179 return xfrs_grped;
1180}
1181
Harini Jayaraman093938a2012-04-20 15:33:23 -06001182static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1183{
1184 u32 spi_ioc;
1185 u32 spi_ioc_orig;
1186
1187 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1188 spi_ioc_orig = spi_ioc;
1189 if (set_flag)
1190 spi_ioc |= SPI_IO_C_FORCE_CS;
1191 else
1192 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1193
1194 if (spi_ioc != spi_ioc_orig)
1195 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1196}
1197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198static void msm_spi_process_message(struct msm_spi *dd)
1199{
1200 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001201 int cs_num;
1202 int rc;
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001203 bool xfer_delay = false;
1204 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001207 cs_num = dd->cur_msg->spi->chip_select;
1208 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1209 (!(dd->cs_gpios[cs_num].valid)) &&
1210 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1211 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1212 spi_cs_rsrcs[cs_num]);
1213 if (rc) {
1214 dev_err(dd->dev, "gpio_request for pin %d failed with "
1215 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1216 rc);
1217 return;
1218 }
1219 dd->cs_gpios[cs_num].valid = 1;
1220 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001222 list_for_each_entry(tr,
1223 &dd->cur_msg->transfers,
1224 transfer_list) {
1225 if (tr->delay_usecs) {
1226 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1227 tr->delay_usecs);
1228 xfer_delay = true;
1229 break;
1230 }
1231 }
1232
1233 /* Don't combine xfers if delay is needed after every xfer */
1234 if (dd->qup_ver || xfer_delay) {
1235 if (dd->qup_ver)
1236 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001238 &dd->cur_msg->transfers,
1239 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001240 struct spi_transfer *t = dd->cur_transfer;
1241 struct spi_transfer *nxt;
1242
1243 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1244 nxt = list_entry(t->transfer_list.next,
1245 struct spi_transfer,
1246 transfer_list);
1247
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001248 if (dd->qup_ver &&
1249 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001250 write_force_cs(dd, 1);
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001251 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001252 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001254
1255 dd->cur_msg_len = dd->cur_transfer->len;
1256 msm_spi_process_transfer(dd);
1257 }
1258 } else {
1259 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1260 struct spi_transfer,
1261 transfer_list);
1262 get_transfer_length(dd);
1263 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1264 /*
1265 * Handling of multi-transfers.
1266 * FIFO mode is used by default
1267 */
1268 list_for_each_entry(dd->cur_transfer,
1269 &dd->cur_msg->transfers,
1270 transfer_list) {
1271 if (!dd->cur_transfer->len)
1272 goto error;
1273 if (xfrs_grped) {
1274 xfrs_grped--;
1275 continue;
1276 } else {
1277 dd->read_len = dd->write_len = 0;
1278 xfrs_grped = combine_transfers(dd);
1279 }
1280
1281 dd->cur_tx_transfer = dd->cur_transfer;
1282 dd->cur_rx_transfer = dd->cur_transfer;
1283 msm_spi_process_transfer(dd);
1284 xfrs_grped--;
1285 }
1286 } else {
1287 /* Handling of a single transfer or
1288 * WR-WR or WR-RD transfers
1289 */
1290 if ((!dd->cur_msg->is_dma_mapped) &&
1291 (msm_use_dm(dd, dd->cur_transfer,
1292 dd->cur_transfer->bits_per_word))) {
1293 /* Mapping of DMA buffers */
1294 int ret = msm_spi_map_dma_buffers(dd);
1295 if (ret < 0) {
1296 dd->cur_msg->status = ret;
1297 goto error;
1298 }
1299 }
1300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301 dd->cur_tx_transfer = dd->cur_transfer;
1302 dd->cur_rx_transfer = dd->cur_transfer;
1303 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001306
1307 return;
1308
1309error:
1310 if (dd->cs_gpios[cs_num].valid) {
1311 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1312 dd->cs_gpios[cs_num].valid = 0;
1313 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314}
1315
1316/* workqueue - pull messages from queue & process */
1317static void msm_spi_workq(struct work_struct *work)
1318{
1319 struct msm_spi *dd =
1320 container_of(work, struct msm_spi, work_data);
1321 unsigned long flags;
1322 u32 status_error = 0;
Alok Chauhanb5f53792012-08-22 19:54:45 +05301323 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324
1325 mutex_lock(&dd->core_lock);
1326
1327 /* Don't allow power collapse until we release mutex */
1328 if (pm_qos_request_active(&qos_req_list))
1329 pm_qos_update_request(&qos_req_list,
1330 dd->pm_lat);
1331 if (dd->use_rlock)
1332 remote_mutex_lock(&dd->r_lock);
1333
Alok Chauhanb5f53792012-08-22 19:54:45 +05301334 /* Configure the spi clk, miso, mosi and cs gpio */
1335 if (dd->pdata->gpio_config) {
1336 rc = dd->pdata->gpio_config();
1337 if (rc) {
1338 dev_err(dd->dev,
1339 "%s: error configuring GPIOs\n",
1340 __func__);
1341 status_error = 1;
1342 }
1343 }
1344
1345 rc = msm_spi_request_gpios(dd);
1346 if (rc)
1347 status_error = 1;
1348
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001349 clk_prepare_enable(dd->clk);
1350 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351 msm_spi_enable_irqs(dd);
1352
1353 if (!msm_spi_is_valid_state(dd)) {
1354 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1355 __func__);
1356 status_error = 1;
1357 }
1358
1359 spin_lock_irqsave(&dd->queue_lock, flags);
1360 while (!list_empty(&dd->queue)) {
1361 dd->cur_msg = list_entry(dd->queue.next,
1362 struct spi_message, queue);
1363 list_del_init(&dd->cur_msg->queue);
1364 spin_unlock_irqrestore(&dd->queue_lock, flags);
1365 if (status_error)
1366 dd->cur_msg->status = -EIO;
1367 else
1368 msm_spi_process_message(dd);
1369 if (dd->cur_msg->complete)
1370 dd->cur_msg->complete(dd->cur_msg->context);
1371 spin_lock_irqsave(&dd->queue_lock, flags);
1372 }
1373 dd->transfer_pending = 0;
1374 spin_unlock_irqrestore(&dd->queue_lock, flags);
1375
1376 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001377 clk_disable_unprepare(dd->clk);
1378 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001379
Alok Chauhanb5f53792012-08-22 19:54:45 +05301380 /* Free the spi clk, miso, mosi, cs gpio */
1381 if (!rc && dd->pdata && dd->pdata->gpio_release)
1382 dd->pdata->gpio_release();
1383 if (!rc)
1384 msm_spi_free_gpios(dd);
1385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386 if (dd->use_rlock)
1387 remote_mutex_unlock(&dd->r_lock);
1388
1389 if (pm_qos_request_active(&qos_req_list))
1390 pm_qos_update_request(&qos_req_list,
1391 PM_QOS_DEFAULT_VALUE);
1392
1393 mutex_unlock(&dd->core_lock);
1394 /* If needed, this can be done after the current message is complete,
1395 and work can be continued upon resume. No motivation for now. */
1396 if (dd->suspended)
1397 wake_up_interruptible(&dd->continue_suspend);
1398}
1399
1400static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1401{
1402 struct msm_spi *dd;
1403 unsigned long flags;
1404 struct spi_transfer *tr;
1405
1406 dd = spi_master_get_devdata(spi->master);
1407 if (dd->suspended)
1408 return -EBUSY;
1409
1410 if (list_empty(&msg->transfers) || !msg->complete)
1411 return -EINVAL;
1412
1413 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1414 /* Check message parameters */
1415 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1416 (tr->bits_per_word &&
1417 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1418 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1419 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1420 "tx=%p, rx=%p\n",
1421 tr->speed_hz, tr->bits_per_word,
1422 tr->tx_buf, tr->rx_buf);
1423 return -EINVAL;
1424 }
1425 }
1426
1427 spin_lock_irqsave(&dd->queue_lock, flags);
1428 if (dd->suspended) {
1429 spin_unlock_irqrestore(&dd->queue_lock, flags);
1430 return -EBUSY;
1431 }
1432 dd->transfer_pending = 1;
1433 list_add_tail(&msg->queue, &dd->queue);
1434 spin_unlock_irqrestore(&dd->queue_lock, flags);
1435 queue_work(dd->workqueue, &dd->work_data);
1436 return 0;
1437}
1438
1439static int msm_spi_setup(struct spi_device *spi)
1440{
1441 struct msm_spi *dd;
1442 int rc = 0;
1443 u32 spi_ioc;
1444 u32 spi_config;
1445 u32 mask;
1446
1447 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1448 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1449 __func__, spi->bits_per_word);
1450 rc = -EINVAL;
1451 }
1452 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1453 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1454 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1455 rc = -EINVAL;
1456 }
1457
1458 if (rc)
1459 goto err_setup_exit;
1460
1461 dd = spi_master_get_devdata(spi->master);
1462
1463 mutex_lock(&dd->core_lock);
1464 if (dd->suspended) {
1465 mutex_unlock(&dd->core_lock);
1466 return -EBUSY;
1467 }
1468
1469 if (dd->use_rlock)
1470 remote_mutex_lock(&dd->r_lock);
1471
Alok Chauhanb5f53792012-08-22 19:54:45 +05301472 /* Configure the spi clk, miso, mosi, cs gpio */
1473 if (dd->pdata->gpio_config) {
1474 rc = dd->pdata->gpio_config();
1475 if (rc) {
1476 dev_err(&spi->dev,
1477 "%s: error configuring GPIOs\n",
1478 __func__);
1479 rc = -ENXIO;
1480 goto err_setup_gpio;
1481 }
1482 }
1483
1484 rc = msm_spi_request_gpios(dd);
1485 if (rc) {
1486 rc = -ENXIO;
1487 goto err_setup_gpio;
1488 }
1489
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001490 clk_prepare_enable(dd->clk);
1491 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492
1493 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1494 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1495 if (spi->mode & SPI_CS_HIGH)
1496 spi_ioc |= mask;
1497 else
1498 spi_ioc &= ~mask;
1499 if (spi->mode & SPI_CPOL)
1500 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1501 else
1502 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1503
1504 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1505
1506 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1507 if (spi->mode & SPI_LOOP)
1508 spi_config |= SPI_CFG_LOOPBACK;
1509 else
1510 spi_config &= ~SPI_CFG_LOOPBACK;
1511 if (spi->mode & SPI_CPHA)
1512 spi_config &= ~SPI_CFG_INPUT_FIRST;
1513 else
1514 spi_config |= SPI_CFG_INPUT_FIRST;
1515 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1516
1517 /* Ensure previous write completed before disabling the clocks */
1518 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001519 clk_disable_unprepare(dd->clk);
1520 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001521
Alok Chauhanb5f53792012-08-22 19:54:45 +05301522 /* Free the spi clk, miso, mosi, cs gpio */
1523 if (dd->pdata && dd->pdata->gpio_release)
1524 dd->pdata->gpio_release();
1525 msm_spi_free_gpios(dd);
1526
1527err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001528 if (dd->use_rlock)
1529 remote_mutex_unlock(&dd->r_lock);
1530 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531err_setup_exit:
1532 return rc;
1533}
1534
1535#ifdef CONFIG_DEBUG_FS
1536static int debugfs_iomem_x32_set(void *data, u64 val)
1537{
1538 writel_relaxed(val, data);
1539 /* Ensure the previous write completed. */
1540 mb();
1541 return 0;
1542}
1543
1544static int debugfs_iomem_x32_get(void *data, u64 *val)
1545{
1546 *val = readl_relaxed(data);
1547 /* Ensure the previous read completed. */
1548 mb();
1549 return 0;
1550}
1551
1552DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1553 debugfs_iomem_x32_set, "0x%08llx\n");
1554
1555static void spi_debugfs_init(struct msm_spi *dd)
1556{
1557 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1558 if (dd->dent_spi) {
1559 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001561 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1562 dd->debugfs_spi_regs[i] =
1563 debugfs_create_file(
1564 debugfs_spi_regs[i].name,
1565 debugfs_spi_regs[i].mode,
1566 dd->dent_spi,
1567 dd->base + debugfs_spi_regs[i].offset,
1568 &fops_iomem_x32);
1569 }
1570 }
1571}
1572
1573static void spi_debugfs_exit(struct msm_spi *dd)
1574{
1575 if (dd->dent_spi) {
1576 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001577
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 debugfs_remove_recursive(dd->dent_spi);
1579 dd->dent_spi = NULL;
1580 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1581 dd->debugfs_spi_regs[i] = NULL;
1582 }
1583}
1584#else
1585static void spi_debugfs_init(struct msm_spi *dd) {}
1586static void spi_debugfs_exit(struct msm_spi *dd) {}
1587#endif
1588
1589/* ===Device attributes begin=== */
1590static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1591 char *buf)
1592{
1593 struct spi_master *master = dev_get_drvdata(dev);
1594 struct msm_spi *dd = spi_master_get_devdata(master);
1595
1596 return snprintf(buf, PAGE_SIZE,
1597 "Device %s\n"
1598 "rx fifo_size = %d spi words\n"
1599 "tx fifo_size = %d spi words\n"
1600 "use_dma ? %s\n"
1601 "rx block size = %d bytes\n"
1602 "tx block size = %d bytes\n"
1603 "burst size = %d bytes\n"
1604 "DMA configuration:\n"
1605 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1606 "--statistics--\n"
1607 "Rx isrs = %d\n"
1608 "Tx isrs = %d\n"
1609 "DMA error = %d\n"
1610 "--debug--\n"
1611 "NA yet\n",
1612 dev_name(dev),
1613 dd->input_fifo_size,
1614 dd->output_fifo_size,
1615 dd->use_dma ? "yes" : "no",
1616 dd->input_block_size,
1617 dd->output_block_size,
1618 dd->burst_size,
1619 dd->tx_dma_chan,
1620 dd->rx_dma_chan,
1621 dd->tx_dma_crci,
1622 dd->rx_dma_crci,
1623 dd->stat_rx + dd->stat_dmov_rx,
1624 dd->stat_tx + dd->stat_dmov_tx,
1625 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1626 );
1627}
1628
1629/* Reset statistics on write */
1630static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1631 const char *buf, size_t count)
1632{
1633 struct msm_spi *dd = dev_get_drvdata(dev);
1634 dd->stat_rx = 0;
1635 dd->stat_tx = 0;
1636 dd->stat_dmov_rx = 0;
1637 dd->stat_dmov_tx = 0;
1638 dd->stat_dmov_rx_err = 0;
1639 dd->stat_dmov_tx_err = 0;
1640 return count;
1641}
1642
1643static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1644
1645static struct attribute *dev_attrs[] = {
1646 &dev_attr_stats.attr,
1647 NULL,
1648};
1649
1650static struct attribute_group dev_attr_grp = {
1651 .attrs = dev_attrs,
1652};
1653/* ===Device attributes end=== */
1654
1655/**
1656 * spi_dmov_tx_complete_func - DataMover tx completion callback
1657 *
1658 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1659 * spinlock @msm_dmov_lock held.
1660 */
1661static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1662 unsigned int result,
1663 struct msm_dmov_errdata *err)
1664{
1665 struct msm_spi *dd;
1666
1667 if (!(result & DMOV_RSLT_VALID)) {
1668 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1669 return;
1670 }
1671 /* restore original context */
1672 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301673 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301675 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1676 return;
1677 complete(&dd->transfer_complete);
1678 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 /* Error or flush */
1680 if (result & DMOV_RSLT_ERROR) {
1681 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1682 dd->stat_dmov_tx_err++;
1683 }
1684 if (result & DMOV_RSLT_FLUSH) {
1685 /*
1686 * Flushing normally happens in process of
1687 * removing, when we are waiting for outstanding
1688 * DMA commands to be flushed.
1689 */
1690 dev_info(dd->dev,
1691 "DMA channel flushed (0x%08x)\n", result);
1692 }
1693 if (err)
1694 dev_err(dd->dev,
1695 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1696 err->flush[0], err->flush[1], err->flush[2],
1697 err->flush[3], err->flush[4], err->flush[5]);
1698 dd->cur_msg->status = -EIO;
1699 complete(&dd->transfer_complete);
1700 }
1701}
1702
1703/**
1704 * spi_dmov_rx_complete_func - DataMover rx completion callback
1705 *
1706 * Executed in IRQ context (Data Mover's IRQ)
1707 * DataMover's spinlock @msm_dmov_lock held.
1708 */
1709static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1710 unsigned int result,
1711 struct msm_dmov_errdata *err)
1712{
1713 struct msm_spi *dd;
1714
1715 if (!(result & DMOV_RSLT_VALID)) {
1716 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1717 result, cmd);
1718 return;
1719 }
1720 /* restore original context */
1721 dd = container_of(cmd, struct msm_spi, rx_hdr);
1722 if (result & DMOV_RSLT_DONE) {
1723 dd->stat_dmov_rx++;
1724 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1725 return;
1726 complete(&dd->transfer_complete);
1727 } else {
1728 /** Error or flush */
1729 if (result & DMOV_RSLT_ERROR) {
1730 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1731 dd->stat_dmov_rx_err++;
1732 }
1733 if (result & DMOV_RSLT_FLUSH) {
1734 dev_info(dd->dev,
1735 "DMA channel flushed(0x%08x)\n", result);
1736 }
1737 if (err)
1738 dev_err(dd->dev,
1739 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1740 err->flush[0], err->flush[1], err->flush[2],
1741 err->flush[3], err->flush[4], err->flush[5]);
1742 dd->cur_msg->status = -EIO;
1743 complete(&dd->transfer_complete);
1744 }
1745}
1746
1747static inline u32 get_chunk_size(struct msm_spi *dd)
1748{
1749 u32 cache_line = dma_get_cache_alignment();
1750
1751 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1752 roundup(dd->burst_size, cache_line))*2;
1753}
1754
1755static void msm_spi_teardown_dma(struct msm_spi *dd)
1756{
1757 int limit = 0;
1758
1759 if (!dd->use_dma)
1760 return;
1761
1762 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001763 msm_dmov_flush(dd->tx_dma_chan, 1);
1764 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 msleep(10);
1766 }
1767
1768 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1769 dd->tx_dmov_cmd_dma);
1770 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1771 dd->tx_padding = dd->rx_padding = NULL;
1772}
1773
1774static __init int msm_spi_init_dma(struct msm_spi *dd)
1775{
1776 dmov_box *box;
1777 u32 cache_line = dma_get_cache_alignment();
1778
1779 /* Allocate all as one chunk, since all is smaller than page size */
1780
1781 /* We send NULL device, since it requires coherent_dma_mask id
1782 device definition, we're okay with using system pool */
1783 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1784 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1785 if (dd->tx_dmov_cmd == NULL)
1786 return -ENOMEM;
1787
1788 /* DMA addresses should be 64 bit aligned aligned */
1789 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1790 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1791 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1792 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1793
1794 /* Buffers should be aligned to cache line */
1795 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1796 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1797 sizeof(struct spi_dmov_cmd), cache_line);
1798 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1799 cache_line);
1800 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1801 cache_line);
1802
1803 /* Setup DM commands */
1804 box = &(dd->rx_dmov_cmd->box);
1805 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1806 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1807 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1808 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1809 offsetof(struct spi_dmov_cmd, cmd_ptr));
1810 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811
1812 box = &(dd->tx_dmov_cmd->box);
1813 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1814 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1815 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1816 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1817 offsetof(struct spi_dmov_cmd, cmd_ptr));
1818 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001819
1820 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1821 CMD_DST_CRCI(dd->tx_dma_crci);
1822 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1823 SPI_OUTPUT_FIFO;
1824 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1825 CMD_SRC_CRCI(dd->rx_dma_crci);
1826 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1827 SPI_INPUT_FIFO;
1828
1829 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001830 msm_dmov_flush(dd->tx_dma_chan, 1);
1831 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832
1833 return 0;
1834}
1835
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001836struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1837{
1838 struct device_node *node = pdev->dev.of_node;
1839 struct msm_spi_platform_data *pdata;
1840
1841 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1842 if (!pdata) {
1843 pr_err("Unable to allocate platform data\n");
1844 return NULL;
1845 }
1846
1847 of_property_read_u32(node, "spi-max-frequency",
1848 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05301849 of_property_read_u32(node, "infinite_mode",
1850 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001851
1852 return pdata;
1853}
1854
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001855static int __init msm_spi_probe(struct platform_device *pdev)
1856{
1857 struct spi_master *master;
1858 struct msm_spi *dd;
1859 struct resource *resource;
1860 int rc = -ENXIO;
1861 int locked = 0;
1862 int i = 0;
1863 int clk_enabled = 0;
1864 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001865 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001866 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001867
1868 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1869 if (!master) {
1870 rc = -ENOMEM;
1871 dev_err(&pdev->dev, "master allocation failed\n");
1872 goto err_probe_exit;
1873 }
1874
1875 master->bus_num = pdev->id;
1876 master->mode_bits = SPI_SUPPORTED_MODES;
1877 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1878 master->setup = msm_spi_setup;
1879 master->transfer = msm_spi_transfer;
1880 platform_set_drvdata(pdev, master);
1881 dd = spi_master_get_devdata(master);
1882
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001883 if (pdev->dev.of_node) {
1884 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1885 master->dev.of_node = pdev->dev.of_node;
1886 pdata = msm_spi_dt_to_pdata(pdev);
1887 if (!pdata) {
1888 rc = -ENOMEM;
1889 goto err_probe_exit;
1890 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001891
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06001892 rc = of_property_read_u32(pdev->dev.of_node,
1893 "cell-index", &pdev->id);
1894 if (rc)
1895 dev_warn(&pdev->dev,
1896 "using default bus_num %d\n", pdev->id);
1897 else
1898 master->bus_num = pdev->id;
1899
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001900 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1901 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1902 i, &flags);
1903 }
1904
1905 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1906 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1907 pdev->dev.of_node, "cs-gpios",
1908 i, &flags);
1909 dd->cs_gpios[i].valid = 0;
1910 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001911 } else {
1912 pdata = pdev->dev.platform_data;
1913 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001914
1915 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1916 resource = platform_get_resource(pdev, IORESOURCE_IO,
1917 i);
1918 dd->spi_gpios[i] = resource ? resource->start : -1;
1919 }
1920
1921 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1922 resource = platform_get_resource(pdev, IORESOURCE_IO,
1923 i + ARRAY_SIZE(spi_rsrcs));
1924 dd->cs_gpios[i].gpio_num = resource ?
1925 resource->start : -1;
1926 dd->cs_gpios[i].valid = 0;
1927 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001928 }
1929
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001931 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001932 if (!resource) {
1933 rc = -ENXIO;
1934 goto err_probe_res;
1935 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937 dd->mem_phys_addr = resource->start;
1938 dd->mem_size = resource_size(resource);
1939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 if (pdata) {
1941 if (pdata->dma_config) {
1942 rc = pdata->dma_config();
1943 if (rc) {
1944 dev_warn(&pdev->dev,
1945 "%s: DM mode not supported\n",
1946 __func__);
1947 dd->use_dma = 0;
1948 goto skip_dma_resources;
1949 }
1950 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001951 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 if (resource) {
1953 dd->rx_dma_chan = resource->start;
1954 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001955 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1956 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 if (!resource) {
1958 rc = -ENXIO;
1959 goto err_probe_res;
1960 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962 dd->rx_dma_crci = resource->start;
1963 dd->tx_dma_crci = resource->end;
1964 dd->use_dma = 1;
1965 master->dma_alignment = dma_get_cache_alignment();
1966 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967 }
1968
Alok Chauhanb5f53792012-08-22 19:54:45 +05301969skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06001970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001971 spin_lock_init(&dd->queue_lock);
1972 mutex_init(&dd->core_lock);
1973 INIT_LIST_HEAD(&dd->queue);
1974 INIT_WORK(&dd->work_data, msm_spi_workq);
1975 init_waitqueue_head(&dd->continue_suspend);
1976 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001977 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 if (!dd->workqueue)
1979 goto err_probe_workq;
1980
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001981 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1982 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001983 rc = -ENXIO;
1984 goto err_probe_reqmem;
1985 }
1986
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001987 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1988 if (!dd->base) {
1989 rc = -ENOMEM;
1990 goto err_probe_reqmem;
1991 }
1992
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 if (pdata && pdata->rsl_id) {
1994 struct remote_mutex_id rmid;
1995 rmid.r_spinlock_id = pdata->rsl_id;
1996 rmid.delay_us = SPI_TRYLOCK_DELAY;
1997
1998 rc = remote_mutex_init(&dd->r_lock, &rmid);
1999 if (rc) {
2000 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2001 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2002 __func__, rc);
2003 goto err_probe_rlock_init;
2004 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002005
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002006 dd->use_rlock = 1;
2007 dd->pm_lat = pdata->pm_lat;
Alok Chauhanb5f53792012-08-22 19:54:45 +05302008 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
2009 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012 mutex_lock(&dd->core_lock);
2013 if (dd->use_rlock)
2014 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002016 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002018 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002020 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002021 rc = PTR_ERR(dd->clk);
2022 goto err_probe_clk_get;
2023 }
2024
Matt Wagantallac294852011-08-17 15:44:58 -07002025 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002026 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002027 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002028 rc = PTR_ERR(dd->pclk);
2029 goto err_probe_pclk_get;
2030 }
2031
2032 if (pdata && pdata->max_clock_speed)
2033 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2034
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002035 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002037 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002038 __func__);
2039 goto err_probe_clk_enable;
2040 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002041
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002042 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002043 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002045 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046 __func__);
2047 goto err_probe_pclk_enable;
2048 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002050 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002051 rc = msm_spi_configure_gsbi(dd, pdev);
2052 if (rc)
2053 goto err_probe_gsbi;
2054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002055 msm_spi_calculate_fifo_size(dd);
2056 if (dd->use_dma) {
2057 rc = msm_spi_init_dma(dd);
2058 if (rc)
2059 goto err_probe_dma;
2060 }
2061
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002062 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002063 /*
2064 * The SPI core generates a bogus input overrun error on some targets,
2065 * when a transition from run to reset state occurs and if the FIFO has
2066 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2067 * bit.
2068 */
2069 msm_spi_enable_error_flags(dd);
2070
2071 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2072 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2073 if (rc)
2074 goto err_probe_state;
2075
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002076 clk_disable_unprepare(dd->clk);
2077 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078 clk_enabled = 0;
2079 pclk_enabled = 0;
2080
2081 dd->suspended = 0;
2082 dd->transfer_pending = 0;
2083 dd->multi_xfr = 0;
2084 dd->mode = SPI_MODE_NONE;
2085
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002086 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087 if (rc)
2088 goto err_probe_irq;
2089
2090 msm_spi_disable_irqs(dd);
2091 if (dd->use_rlock)
2092 remote_mutex_unlock(&dd->r_lock);
2093
2094 mutex_unlock(&dd->core_lock);
2095 locked = 0;
2096
2097 rc = spi_register_master(master);
2098 if (rc)
2099 goto err_probe_reg_master;
2100
2101 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2102 if (rc) {
2103 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2104 goto err_attrs;
2105 }
2106
2107 spi_debugfs_init(dd);
Kiran Gunda2b285652012-07-30 13:22:39 +05302108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002109 return 0;
2110
2111err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002112 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002113err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002114err_probe_irq:
2115err_probe_state:
2116 msm_spi_teardown_dma(dd);
2117err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002118err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002119 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002120 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121err_probe_pclk_enable:
2122 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002123 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124err_probe_clk_enable:
2125 clk_put(dd->pclk);
2126err_probe_pclk_get:
2127 clk_put(dd->clk);
2128err_probe_clk_get:
2129 if (locked) {
2130 if (dd->use_rlock)
2131 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133 mutex_unlock(&dd->core_lock);
2134 }
2135err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002136err_probe_reqmem:
2137 destroy_workqueue(dd->workqueue);
2138err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002139err_probe_res:
2140 spi_master_put(master);
2141err_probe_exit:
2142 return rc;
2143}
2144
2145#ifdef CONFIG_PM
2146static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2147{
2148 struct spi_master *master = platform_get_drvdata(pdev);
2149 struct msm_spi *dd;
2150 unsigned long flags;
2151
2152 if (!master)
2153 goto suspend_exit;
2154 dd = spi_master_get_devdata(master);
2155 if (!dd)
2156 goto suspend_exit;
2157
2158 /* Make sure nothing is added to the queue while we're suspending */
2159 spin_lock_irqsave(&dd->queue_lock, flags);
2160 dd->suspended = 1;
2161 spin_unlock_irqrestore(&dd->queue_lock, flags);
2162
2163 /* Wait for transactions to end, or time out */
2164 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165
2166suspend_exit:
2167 return 0;
2168}
2169
2170static int msm_spi_resume(struct platform_device *pdev)
2171{
2172 struct spi_master *master = platform_get_drvdata(pdev);
2173 struct msm_spi *dd;
2174
2175 if (!master)
2176 goto resume_exit;
2177 dd = spi_master_get_devdata(master);
2178 if (!dd)
2179 goto resume_exit;
2180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002181 dd->suspended = 0;
2182resume_exit:
2183 return 0;
2184}
2185#else
2186#define msm_spi_suspend NULL
2187#define msm_spi_resume NULL
2188#endif /* CONFIG_PM */
2189
2190static int __devexit msm_spi_remove(struct platform_device *pdev)
2191{
2192 struct spi_master *master = platform_get_drvdata(pdev);
2193 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002194
2195 pm_qos_remove_request(&qos_req_list);
2196 spi_debugfs_exit(dd);
2197 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201 clk_put(dd->clk);
2202 clk_put(dd->pclk);
2203 destroy_workqueue(dd->workqueue);
2204 platform_set_drvdata(pdev, 0);
2205 spi_unregister_master(master);
2206 spi_master_put(master);
2207
2208 return 0;
2209}
2210
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002211static struct of_device_id msm_spi_dt_match[] = {
2212 {
2213 .compatible = "qcom,spi-qup-v2",
2214 },
2215 {}
2216};
2217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218static struct platform_driver msm_spi_driver = {
2219 .driver = {
2220 .name = SPI_DRV_NAME,
2221 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002222 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002223 },
2224 .suspend = msm_spi_suspend,
2225 .resume = msm_spi_resume,
2226 .remove = __exit_p(msm_spi_remove),
2227};
2228
2229static int __init msm_spi_init(void)
2230{
2231 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2232}
2233module_init(msm_spi_init);
2234
2235static void __exit msm_spi_exit(void)
2236{
2237 platform_driver_unregister(&msm_spi_driver);
2238}
2239module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002240
2241MODULE_LICENSE("GPL v2");
2242MODULE_VERSION("0.4");
2243MODULE_ALIAS("platform:"SPI_DRV_NAME);