blob: 4f2b2d2bf5fa0612663360f4a7675138969d2fe9 [file] [log] [blame]
Alok Chauhanc27843e2013-02-15 16:04:20 +05301/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070046static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
47 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048{
49 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050 unsigned long gsbi_mem_phys_addr;
51 size_t gsbi_mem_size;
52 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070056 return 0;
57
58 gsbi_mem_phys_addr = resource->start;
59 gsbi_mem_size = resource_size(resource);
60 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
61 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063
64 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size);
66 if (!gsbi_base)
67 return -ENXIO;
68
69 /* Set GSBI to SPI mode */
70 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 return 0;
73}
74
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070077 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
78 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
79 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
80 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
81 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
82 if (dd->qup_ver)
83 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084}
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086static inline int msm_spi_request_gpios(struct msm_spi *dd)
87{
88 int i;
89 int result = 0;
90
91 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
92 if (dd->spi_gpios[i] >= 0) {
93 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
94 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060095 dev_err(dd->dev, "%s: gpio_request for pin %d "
96 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 dd->spi_gpios[i], result);
98 goto error;
99 }
100 }
101 }
102 return 0;
103
104error:
105 for (; --i >= 0;) {
106 if (dd->spi_gpios[i] >= 0)
107 gpio_free(dd->spi_gpios[i]);
108 }
109 return result;
110}
111
112static inline void msm_spi_free_gpios(struct msm_spi *dd)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
117 if (dd->spi_gpios[i] >= 0)
118 gpio_free(dd->spi_gpios[i]);
119 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600120
121 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
122 if (dd->cs_gpios[i].valid) {
123 gpio_free(dd->cs_gpios[i].gpio_num);
124 dd->cs_gpios[i].valid = 0;
125 }
126 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127}
128
Gilad Avidov70af2152012-10-18 09:34:35 -0600129/**
130 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
131 * @clk the clock for which to find nearest lower rate
132 * @rate clock frequency in Hz
133 * @return nearest lower rate or negative error value
134 *
135 * Public clock API extends clk_round_rate which is a ceiling function. This
136 * function is a floor function implemented as a binary search using the
137 * ceiling function.
138 */
139static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
140{
141 long lowest_available, nearest_low, step_size, cur;
142 long step_direction = -1;
143 long guess = rate;
144 int max_steps = 10;
145
146 cur = clk_round_rate(clk, rate);
147 if (cur == rate)
148 return rate;
149
150 /* if we got here then: cur > rate */
151 lowest_available = clk_round_rate(clk, 0);
152 if (lowest_available > rate)
153 return -EINVAL;
154
155 step_size = (rate - lowest_available) >> 1;
156 nearest_low = lowest_available;
157
158 while (max_steps-- && step_size) {
159 guess += step_size * step_direction;
160
161 cur = clk_round_rate(clk, guess);
162
163 if ((cur < rate) && (cur > nearest_low))
164 nearest_low = cur;
165
166 /*
167 * if we stepped too far, then start stepping in the other
168 * direction with half the step size
169 */
170 if (((cur > rate) && (step_direction > 0))
171 || ((cur < rate) && (step_direction < 0))) {
172 step_direction = -step_direction;
173 step_size >>= 1;
174 }
175 }
176 return nearest_low;
177}
178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179static void msm_spi_clock_set(struct msm_spi *dd, int speed)
180{
Gilad Avidov70af2152012-10-18 09:34:35 -0600181 long rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 int rc;
183
Gilad Avidov70af2152012-10-18 09:34:35 -0600184 rate = msm_spi_clk_max_rate(dd->clk, speed);
185 if (rate < 0) {
186 dev_err(dd->dev,
187 "%s: no match found for requested clock frequency:%d",
188 __func__, speed);
189 return;
190 }
191
192 rc = clk_set_rate(dd->clk, rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 if (!rc)
Gilad Avidov70af2152012-10-18 09:34:35 -0600194 dd->clock_speed = rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195}
196
197static int msm_spi_calculate_size(int *fifo_size,
198 int *block_size,
199 int block,
200 int mult)
201{
202 int words;
203
204 switch (block) {
205 case 0:
206 words = 1; /* 4 bytes */
207 break;
208 case 1:
209 words = 4; /* 16 bytes */
210 break;
211 case 2:
212 words = 8; /* 32 bytes */
213 break;
214 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700215 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 switch (mult) {
219 case 0:
220 *fifo_size = words * 2;
221 break;
222 case 1:
223 *fifo_size = words * 4;
224 break;
225 case 2:
226 *fifo_size = words * 8;
227 break;
228 case 3:
229 *fifo_size = words * 16;
230 break;
231 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700232 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235 *block_size = words * sizeof(u32); /* in bytes */
236 return 0;
237}
238
239static void get_next_transfer(struct msm_spi *dd)
240{
241 struct spi_transfer *t = dd->cur_transfer;
242
243 if (t->transfer_list.next != &dd->cur_msg->transfers) {
244 dd->cur_transfer = list_entry(t->transfer_list.next,
245 struct spi_transfer,
246 transfer_list);
247 dd->write_buf = dd->cur_transfer->tx_buf;
248 dd->read_buf = dd->cur_transfer->rx_buf;
249 }
250}
251
252static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
253{
254 u32 spi_iom;
255 int block;
256 int mult;
257
258 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
259
260 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
261 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
262 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
263 block, mult)) {
264 goto fifo_size_err;
265 }
266
267 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
268 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
269 if (msm_spi_calculate_size(&dd->output_fifo_size,
270 &dd->output_block_size, block, mult)) {
271 goto fifo_size_err;
272 }
273 /* DM mode is not available for this block size */
274 if (dd->input_block_size == 4 || dd->output_block_size == 4)
275 dd->use_dma = 0;
276
Alok Chauhanc27843e2013-02-15 16:04:20 +0530277 if (dd->use_dma) {
278 dd->input_burst_size = max(dd->input_block_size,
279 DM_BURST_SIZE);
280 dd->output_burst_size = max(dd->output_block_size,
281 DM_BURST_SIZE);
282 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 return;
284
285fifo_size_err:
286 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700287 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 return;
289}
290
291static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
292{
293 u32 data_in;
294 int i;
295 int shift;
296
297 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
298 if (dd->read_buf) {
299 for (i = 0; (i < dd->bytes_per_word) &&
300 dd->rx_bytes_remaining; i++) {
301 /* The data format depends on bytes_per_word:
302 4 bytes: 0x12345678
303 3 bytes: 0x00123456
304 2 bytes: 0x00001234
305 1 byte : 0x00000012
306 */
307 shift = 8 * (dd->bytes_per_word - i - 1);
308 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
309 dd->rx_bytes_remaining--;
310 }
311 } else {
312 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
313 dd->rx_bytes_remaining -= dd->bytes_per_word;
314 else
315 dd->rx_bytes_remaining = 0;
316 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 dd->read_xfr_cnt++;
319 if (dd->multi_xfr) {
320 if (!dd->rx_bytes_remaining)
321 dd->read_xfr_cnt = 0;
322 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
323 dd->read_len) {
324 struct spi_transfer *t = dd->cur_rx_transfer;
325 if (t->transfer_list.next != &dd->cur_msg->transfers) {
326 t = list_entry(t->transfer_list.next,
327 struct spi_transfer,
328 transfer_list);
329 dd->read_buf = t->rx_buf;
330 dd->read_len = t->len;
331 dd->read_xfr_cnt = 0;
332 dd->cur_rx_transfer = t;
333 }
334 }
335 }
336}
337
338static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
339{
340 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
341
342 return spi_op & SPI_OP_STATE_VALID;
343}
344
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600345static inline void msm_spi_udelay(unsigned long delay_usecs)
346{
347 /*
348 * For smaller values of delay, context switch time
349 * would negate the usage of usleep
350 */
351 if (delay_usecs > 20)
352 usleep_range(delay_usecs, delay_usecs);
353 else if (delay_usecs)
354 udelay(delay_usecs);
355}
356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357static inline int msm_spi_wait_valid(struct msm_spi *dd)
358{
359 unsigned long delay = 0;
360 unsigned long timeout = 0;
361
362 if (dd->clock_speed == 0)
363 return -EINVAL;
364 /*
365 * Based on the SPI clock speed, sufficient time
366 * should be given for the SPI state transition
367 * to occur
368 */
369 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
370 /*
371 * For small delay values, the default timeout would
372 * be one jiffy
373 */
374 if (delay < SPI_DELAY_THRESHOLD)
375 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600376
377 /* Adding one to round off to the nearest jiffy */
378 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 while (!msm_spi_is_valid_state(dd)) {
380 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600381 if (!msm_spi_is_valid_state(dd)) {
382 if (dd->cur_msg)
383 dd->cur_msg->status = -EIO;
384 dev_err(dd->dev, "%s: SPI operational state"
385 "not valid\n", __func__);
386 return -ETIMEDOUT;
387 } else
388 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389 }
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600390 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 }
392 return 0;
393}
394
395static inline int msm_spi_set_state(struct msm_spi *dd,
396 enum msm_spi_state state)
397{
398 enum msm_spi_state cur_state;
399 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700400 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401 cur_state = readl_relaxed(dd->base + SPI_STATE);
402 /* Per spec:
403 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
404 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
405 (state == SPI_OP_STATE_RESET)) {
406 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
407 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
408 } else {
409 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
410 dd->base + SPI_STATE);
411 }
412 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700413 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414
415 return 0;
416}
417
418static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
419{
420 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
421
422 if (n != (*config & SPI_CFG_N))
423 *config = (*config & ~SPI_CFG_N) | n;
424
425 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
426 if (dd->read_buf == NULL)
427 *config |= SPI_NO_INPUT;
428 if (dd->write_buf == NULL)
429 *config |= SPI_NO_OUTPUT;
430 }
431}
432
433static void msm_spi_set_config(struct msm_spi *dd, int bpw)
434{
435 u32 spi_config;
436
437 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
438
439 if (dd->cur_msg->spi->mode & SPI_CPHA)
440 spi_config &= ~SPI_CFG_INPUT_FIRST;
441 else
442 spi_config |= SPI_CFG_INPUT_FIRST;
443 if (dd->cur_msg->spi->mode & SPI_LOOP)
444 spi_config |= SPI_CFG_LOOPBACK;
445 else
446 spi_config &= ~SPI_CFG_LOOPBACK;
447 msm_spi_add_configs(dd, &spi_config, bpw-1);
448 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
449 msm_spi_set_qup_config(dd, bpw);
450}
451
452static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
453{
454 dmov_box *box;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530455 int bytes_to_send, bytes_sent;
456 int tx_num_rows, rx_num_rows;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 u32 num_transfers;
458
459 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530460 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 if (dd->write_len && !dd->read_len) {
462 /* WR-WR transfer */
463 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
464 dd->write_buf = dd->temp_buf;
465 } else {
466 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
467 /* For WR-RD transfer, bytes_sent can be negative */
468 if (bytes_sent < 0)
469 bytes_sent = 0;
470 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530471 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gunda2b285652012-07-30 13:22:39 +0530472 * 4K bytes for targets that have only 12 bits in
473 * QUP_MAX_OUTPUT_CNT register. If the target supports
474 * more than 12bits then we send the data in chunks of
475 * the infinite_mode value that is defined in the
476 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530477 */
478 if (!dd->pdata->infinite_mode)
Kiran Gunda2b285652012-07-30 13:22:39 +0530479 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530480 else
Kiran Gunda2b285652012-07-30 13:22:39 +0530481 dd->max_trfr_len = (dd->pdata->infinite_mode) *
482 (dd->bytes_per_word);
483
484 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
485 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530488 dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
489 dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
490 tx_num_rows = bytes_to_send / dd->output_burst_size;
491 rx_num_rows = bytes_to_send / dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492
493 dd->mode = SPI_DMOV_MODE;
494
Alok Chauhanc27843e2013-02-15 16:04:20 +0530495 if (tx_num_rows) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 /* src in 16 MSB, dst in 16 LSB */
497 box = &dd->tx_dmov_cmd->box;
498 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530499 box->src_dst_len
500 = (dd->output_burst_size << 16) | dd->output_burst_size;
501 box->num_rows = (tx_num_rows << 16) | tx_num_rows;
502 box->row_offset = (dd->output_burst_size << 16) | 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503
Alok Chauhanc27843e2013-02-15 16:04:20 +0530504 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
505 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
506 offsetof(struct spi_dmov_cmd, box));
507 } else {
508 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
509 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
510 offsetof(struct spi_dmov_cmd, single_pad));
511 }
512
513 if (rx_num_rows) {
514 /* src in 16 MSB, dst in 16 LSB */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515 box = &dd->rx_dmov_cmd->box;
516 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530517 box->src_dst_len
518 = (dd->input_burst_size << 16) | dd->input_burst_size;
519 box->num_rows = (rx_num_rows << 16) | rx_num_rows;
520 box->row_offset = (0 << 16) | dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
523 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
524 offsetof(struct spi_dmov_cmd, box));
525 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
527 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
528 offsetof(struct spi_dmov_cmd, single_pad));
529 }
530
Alok Chauhanc27843e2013-02-15 16:04:20 +0530531 if (!dd->tx_unaligned_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 } else {
534 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530535 u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536
537 if ((dd->multi_xfr) && (dd->read_len <= 0))
Alok Chauhanc27843e2013-02-15 16:04:20 +0530538 tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539
540 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541
Alok Chauhanc27843e2013-02-15 16:04:20 +0530542 memset(dd->tx_padding, 0, dd->output_burst_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 if (dd->write_buf)
Alok Chauhanc27843e2013-02-15 16:04:20 +0530544 memcpy(dd->tx_padding, dd->write_buf + tx_offset,
545 dd->tx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546
547 tx_cmd->src = dd->tx_padding_dma;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530548 tx_cmd->len = dd->output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 }
Alok Chauhanc27843e2013-02-15 16:04:20 +0530550
551 if (!dd->rx_unaligned_len) {
552 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
553 } else {
554 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
555 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
556
557 memset(dd->rx_padding, 0, dd->input_burst_size);
558 rx_cmd->dst = dd->rx_padding_dma;
559 rx_cmd->len = dd->input_burst_size;
560 }
561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 /* This also takes care of the padding dummy buf
563 Since this is set to the correct length, the
564 dummy bytes won't be actually sent */
565 if (dd->multi_xfr) {
566 u32 write_transfers = 0;
567 u32 read_transfers = 0;
568
569 if (dd->write_len > 0) {
570 write_transfers = DIV_ROUND_UP(dd->write_len,
571 dd->bytes_per_word);
572 writel_relaxed(write_transfers,
573 dd->base + SPI_MX_OUTPUT_COUNT);
574 }
575 if (dd->read_len > 0) {
576 /*
577 * The read following a write transfer must take
578 * into account, that the bytes pertaining to
579 * the write transfer needs to be discarded,
580 * before the actual read begins.
581 */
582 read_transfers = DIV_ROUND_UP(dd->read_len +
583 dd->write_len,
584 dd->bytes_per_word);
585 writel_relaxed(read_transfers,
586 dd->base + SPI_MX_INPUT_COUNT);
587 }
588 } else {
589 if (dd->write_buf)
590 writel_relaxed(num_transfers,
591 dd->base + SPI_MX_OUTPUT_COUNT);
592 if (dd->read_buf)
593 writel_relaxed(num_transfers,
594 dd->base + SPI_MX_INPUT_COUNT);
595 }
596}
597
598static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
599{
600 dma_coherent_pre_ops();
601 if (dd->write_buf)
602 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
603 if (dd->read_buf)
604 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
605}
606
Kiran Gunda2b285652012-07-30 13:22:39 +0530607/* SPI core on targets that does not support infinite mode can send
608 maximum of 4K transfers or 64K transfers depending up on size of
609 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
610 chunks. Upon completion we send the next chunk, or complete the
611 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530612 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613*/
614static int msm_spi_dm_send_next(struct msm_spi *dd)
615{
616 /* By now we should have sent all the bytes in FIFO mode,
617 * However to make things right, we'll check anyway.
618 */
619 if (dd->mode != SPI_DMOV_MODE)
620 return 0;
621
Kiran Gundae8f16742012-06-27 10:06:32 +0530622 /* On targets which does not support infinite mode,
623 We need to send more chunks, if we sent max last time */
Kiran Gunda2b285652012-07-30 13:22:39 +0530624 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
625 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
627 return 0;
628 dd->read_len = dd->write_len = 0;
629 msm_spi_setup_dm_transfer(dd);
630 msm_spi_enqueue_dm_commands(dd);
631 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
632 return 0;
633 return 1;
634 } else if (dd->read_len && dd->write_len) {
635 dd->tx_bytes_remaining -= dd->cur_transfer->len;
636 if (list_is_last(&dd->cur_transfer->transfer_list,
637 &dd->cur_msg->transfers))
638 return 0;
639 get_next_transfer(dd);
640 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
641 return 0;
642 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
643 dd->read_buf = dd->temp_buf;
644 dd->read_len = dd->write_len = -1;
645 msm_spi_setup_dm_transfer(dd);
646 msm_spi_enqueue_dm_commands(dd);
647 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
648 return 0;
649 return 1;
650 }
651 return 0;
652}
653
654static inline void msm_spi_ack_transfer(struct msm_spi *dd)
655{
656 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
657 SPI_OP_MAX_OUTPUT_DONE_FLAG,
658 dd->base + SPI_OPERATIONAL);
659 /* Ensure done flag was cleared before proceeding further */
660 mb();
661}
662
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700663/* Figure which irq occured and call the relevant functions */
664static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
665{
666 u32 op, ret = IRQ_NONE;
667 struct msm_spi *dd = dev_id;
668
669 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
670 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
671 struct spi_master *master = dev_get_drvdata(dd->dev);
672 ret |= msm_spi_error_irq(irq, master);
673 }
674
675 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
676 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
677 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
678 dd->base + SPI_OPERATIONAL);
679 /*
680 * Ensure service flag was cleared before further
681 * processing of interrupt.
682 */
683 mb();
684 ret |= msm_spi_input_irq(irq, dev_id);
685 }
686
687 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
688 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
689 dd->base + SPI_OPERATIONAL);
690 /*
691 * Ensure service flag was cleared before further
692 * processing of interrupt.
693 */
694 mb();
695 ret |= msm_spi_output_irq(irq, dev_id);
696 }
697
698 if (dd->done) {
699 complete(&dd->transfer_complete);
700 dd->done = 0;
701 }
702 return ret;
703}
704
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
706{
707 struct msm_spi *dd = dev_id;
708
709 dd->stat_rx++;
710
711 if (dd->mode == SPI_MODE_NONE)
712 return IRQ_HANDLED;
713
714 if (dd->mode == SPI_DMOV_MODE) {
715 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
716 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
717 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
718 msm_spi_ack_transfer(dd);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530719 if (dd->rx_unaligned_len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720 if (atomic_inc_return(&dd->rx_irq_called) == 1)
721 return IRQ_HANDLED;
722 }
723 msm_spi_complete(dd);
724 return IRQ_HANDLED;
725 }
726 return IRQ_NONE;
727 }
728
729 if (dd->mode == SPI_FIFO_MODE) {
730 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
731 SPI_OP_IP_FIFO_NOT_EMPTY) &&
732 (dd->rx_bytes_remaining > 0)) {
733 msm_spi_read_word_from_fifo(dd);
734 }
735 if (dd->rx_bytes_remaining == 0)
736 msm_spi_complete(dd);
737 }
738
739 return IRQ_HANDLED;
740}
741
742static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
743{
744 u32 word;
745 u8 byte;
746 int i;
747
748 word = 0;
749 if (dd->write_buf) {
750 for (i = 0; (i < dd->bytes_per_word) &&
751 dd->tx_bytes_remaining; i++) {
752 dd->tx_bytes_remaining--;
753 byte = *dd->write_buf++;
754 word |= (byte << (BITS_PER_BYTE * (3 - i)));
755 }
756 } else
757 if (dd->tx_bytes_remaining > dd->bytes_per_word)
758 dd->tx_bytes_remaining -= dd->bytes_per_word;
759 else
760 dd->tx_bytes_remaining = 0;
761 dd->write_xfr_cnt++;
762 if (dd->multi_xfr) {
763 if (!dd->tx_bytes_remaining)
764 dd->write_xfr_cnt = 0;
765 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
766 dd->write_len) {
767 struct spi_transfer *t = dd->cur_tx_transfer;
768 if (t->transfer_list.next != &dd->cur_msg->transfers) {
769 t = list_entry(t->transfer_list.next,
770 struct spi_transfer,
771 transfer_list);
772 dd->write_buf = t->tx_buf;
773 dd->write_len = t->len;
774 dd->write_xfr_cnt = 0;
775 dd->cur_tx_transfer = t;
776 }
777 }
778 }
779 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
780}
781
782static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
783{
784 int count = 0;
785
786 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
787 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
788 SPI_OP_OUTPUT_FIFO_FULL)) {
789 msm_spi_write_word_to_fifo(dd);
790 count++;
791 }
792}
793
794static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
795{
796 struct msm_spi *dd = dev_id;
797
798 dd->stat_tx++;
799
800 if (dd->mode == SPI_MODE_NONE)
801 return IRQ_HANDLED;
802
803 if (dd->mode == SPI_DMOV_MODE) {
804 /* TX_ONLY transaction is handled here
805 This is the only place we send complete at tx and not rx */
806 if (dd->read_buf == NULL &&
807 readl_relaxed(dd->base + SPI_OPERATIONAL) &
808 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
809 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530810 if (atomic_inc_return(&dd->tx_irq_called) == 1)
811 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 msm_spi_complete(dd);
813 return IRQ_HANDLED;
814 }
815 return IRQ_NONE;
816 }
817
818 /* Output FIFO is empty. Transmit any outstanding write data. */
819 if (dd->mode == SPI_FIFO_MODE)
820 msm_spi_write_rmn_to_fifo(dd);
821
822 return IRQ_HANDLED;
823}
824
825static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
826{
827 struct spi_master *master = dev_id;
828 struct msm_spi *dd = spi_master_get_devdata(master);
829 u32 spi_err;
830
831 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
832 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
833 dev_warn(master->dev.parent, "SPI output overrun error\n");
834 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
835 dev_warn(master->dev.parent, "SPI input underrun error\n");
836 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
837 dev_warn(master->dev.parent, "SPI output underrun error\n");
838 msm_spi_get_clk_err(dd, &spi_err);
839 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
840 dev_warn(master->dev.parent, "SPI clock overrun error\n");
841 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
842 dev_warn(master->dev.parent, "SPI clock underrun error\n");
843 msm_spi_clear_error_flags(dd);
844 msm_spi_ack_clk_err(dd);
845 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
846 mb();
847 return IRQ_HANDLED;
848}
849
850static int msm_spi_map_dma_buffers(struct msm_spi *dd)
851{
852 struct device *dev;
853 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600854 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 void *tx_buf, *rx_buf;
856 unsigned tx_len, rx_len;
857 int ret = -EINVAL;
858
859 dev = &dd->cur_msg->spi->dev;
860 first_xfr = dd->cur_transfer;
861 tx_buf = (void *)first_xfr->tx_buf;
862 rx_buf = first_xfr->rx_buf;
863 tx_len = rx_len = first_xfr->len;
864
865 /*
866 * For WR-WR and WR-RD transfers, we allocate our own temporary
867 * buffer and copy the data to/from the client buffers.
868 */
869 if (dd->multi_xfr) {
870 dd->temp_buf = kzalloc(dd->cur_msg_len,
871 GFP_KERNEL | __GFP_DMA);
872 if (!dd->temp_buf)
873 return -ENOMEM;
874 nxt_xfr = list_entry(first_xfr->transfer_list.next,
875 struct spi_transfer, transfer_list);
876
877 if (dd->write_len && !dd->read_len) {
878 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
879 goto error;
880
881 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
882 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
883 nxt_xfr->len);
884 tx_buf = dd->temp_buf;
885 tx_len = dd->cur_msg_len;
886 } else {
887 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
888 goto error;
889
890 rx_buf = dd->temp_buf;
891 rx_len = dd->cur_msg_len;
892 }
893 }
894 if (tx_buf != NULL) {
895 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
896 tx_len, DMA_TO_DEVICE);
897 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
898 dev_err(dev, "dma %cX %d bytes error\n",
899 'T', tx_len);
900 ret = -ENOMEM;
901 goto error;
902 }
903 }
904 if (rx_buf != NULL) {
905 dma_addr_t dma_handle;
906 dma_handle = dma_map_single(dev, rx_buf,
907 rx_len, DMA_FROM_DEVICE);
908 if (dma_mapping_error(NULL, dma_handle)) {
909 dev_err(dev, "dma %cX %d bytes error\n",
910 'R', rx_len);
911 if (tx_buf != NULL)
912 dma_unmap_single(NULL, first_xfr->tx_dma,
913 tx_len, DMA_TO_DEVICE);
914 ret = -ENOMEM;
915 goto error;
916 }
917 if (dd->multi_xfr)
918 nxt_xfr->rx_dma = dma_handle;
919 else
920 first_xfr->rx_dma = dma_handle;
921 }
922 return 0;
923
924error:
925 kfree(dd->temp_buf);
926 dd->temp_buf = NULL;
927 return ret;
928}
929
930static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
931{
932 struct device *dev;
933 u32 offset;
934
935 dev = &dd->cur_msg->spi->dev;
936 if (dd->cur_msg->is_dma_mapped)
937 goto unmap_end;
938
939 if (dd->multi_xfr) {
940 if (dd->write_len && !dd->read_len) {
941 dma_unmap_single(dev,
942 dd->cur_transfer->tx_dma,
943 dd->cur_msg_len,
944 DMA_TO_DEVICE);
945 } else {
946 struct spi_transfer *prev_xfr;
947 prev_xfr = list_entry(
948 dd->cur_transfer->transfer_list.prev,
949 struct spi_transfer,
950 transfer_list);
951 if (dd->cur_transfer->rx_buf) {
952 dma_unmap_single(dev,
953 dd->cur_transfer->rx_dma,
954 dd->cur_msg_len,
955 DMA_FROM_DEVICE);
956 }
957 if (prev_xfr->tx_buf) {
958 dma_unmap_single(dev,
959 prev_xfr->tx_dma,
960 prev_xfr->len,
961 DMA_TO_DEVICE);
962 }
Alok Chauhanc27843e2013-02-15 16:04:20 +0530963 if (dd->rx_unaligned_len && dd->read_buf) {
964 offset = dd->cur_msg_len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 dma_coherent_post_ops();
966 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhanc27843e2013-02-15 16:04:20 +0530967 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 memcpy(dd->cur_transfer->rx_buf,
969 dd->read_buf + prev_xfr->len,
970 dd->cur_transfer->len);
971 }
972 }
973 kfree(dd->temp_buf);
974 dd->temp_buf = NULL;
975 return;
976 } else {
977 if (dd->cur_transfer->rx_buf)
978 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
979 dd->cur_transfer->len,
980 DMA_FROM_DEVICE);
981 if (dd->cur_transfer->tx_buf)
982 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
983 dd->cur_transfer->len,
984 DMA_TO_DEVICE);
985 }
986
987unmap_end:
988 /* If we padded the transfer, we copy it from the padding buf */
Alok Chauhanc27843e2013-02-15 16:04:20 +0530989 if (dd->rx_unaligned_len && dd->read_buf) {
990 offset = dd->cur_transfer->len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 dma_coherent_post_ops();
992 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhanc27843e2013-02-15 16:04:20 +0530993 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994 }
995}
996
997/**
998 * msm_use_dm - decides whether to use data mover for this
999 * transfer
1000 * @dd: device
1001 * @tr: transfer
1002 *
1003 * Start using DM if:
1004 * 1. Transfer is longer than 3*block size.
1005 * 2. Buffers should be aligned to cache line.
1006 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
1007 */
1008static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
1009 u8 bpw)
1010{
1011 u32 cache_line = dma_get_cache_alignment();
1012
1013 if (!dd->use_dma)
1014 return 0;
1015
1016 if (dd->cur_msg_len < 3*dd->input_block_size)
1017 return 0;
1018
1019 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
1020 return 0;
1021
1022 if (tr->tx_buf) {
1023 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1024 return 0;
1025 }
1026 if (tr->rx_buf) {
1027 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1028 return 0;
1029 }
1030
1031 if (tr->cs_change &&
1032 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
1033 return 0;
1034 return 1;
1035}
1036
1037static void msm_spi_process_transfer(struct msm_spi *dd)
1038{
1039 u8 bpw;
1040 u32 spi_ioc;
1041 u32 spi_iom;
1042 u32 spi_ioc_orig;
1043 u32 max_speed;
1044 u32 chip_select;
1045 u32 read_count;
1046 u32 timeout;
1047 u32 int_loopback = 0;
1048
1049 dd->tx_bytes_remaining = dd->cur_msg_len;
1050 dd->rx_bytes_remaining = dd->cur_msg_len;
1051 dd->read_buf = dd->cur_transfer->rx_buf;
1052 dd->write_buf = dd->cur_transfer->tx_buf;
1053 init_completion(&dd->transfer_complete);
1054 if (dd->cur_transfer->bits_per_word)
1055 bpw = dd->cur_transfer->bits_per_word;
1056 else
1057 if (dd->cur_msg->spi->bits_per_word)
1058 bpw = dd->cur_msg->spi->bits_per_word;
1059 else
1060 bpw = 8;
1061 dd->bytes_per_word = (bpw + 7) / 8;
1062
1063 if (dd->cur_transfer->speed_hz)
1064 max_speed = dd->cur_transfer->speed_hz;
1065 else
1066 max_speed = dd->cur_msg->spi->max_speed_hz;
1067 if (!dd->clock_speed || max_speed != dd->clock_speed)
1068 msm_spi_clock_set(dd, max_speed);
1069
1070 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1071 if (dd->cur_msg->spi->mode & SPI_LOOP)
1072 int_loopback = 1;
1073 if (int_loopback && dd->multi_xfr &&
1074 (read_count > dd->input_fifo_size)) {
1075 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001076 pr_err(
1077 "%s:Internal Loopback does not support > fifo size"
1078 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079 __func__);
1080 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001081 pr_err(
1082 "%s:Internal Loopback does not support > fifo size"
1083 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084 __func__);
1085 return;
1086 }
1087 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1088 dd->mode = SPI_FIFO_MODE;
1089 if (dd->multi_xfr) {
1090 dd->read_len = dd->cur_transfer->len;
1091 dd->write_len = dd->cur_transfer->len;
1092 }
1093 /* read_count cannot exceed fifo_size, and only one READ COUNT
1094 interrupt is generated per transaction, so for transactions
1095 larger than fifo size READ COUNT must be disabled.
1096 For those transactions we usually move to Data Mover mode.
1097 */
1098 if (read_count <= dd->input_fifo_size) {
1099 writel_relaxed(read_count,
1100 dd->base + SPI_MX_READ_COUNT);
1101 msm_spi_set_write_count(dd, read_count);
1102 } else {
1103 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1104 msm_spi_set_write_count(dd, 0);
1105 }
1106 } else {
1107 dd->mode = SPI_DMOV_MODE;
1108 if (dd->write_len && dd->read_len) {
1109 dd->tx_bytes_remaining = dd->write_len;
1110 dd->rx_bytes_remaining = dd->read_len;
1111 }
1112 }
1113
1114 /* Write mode - fifo or data mover*/
1115 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1116 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1117 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1118 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1119 /* Turn on packing for data mover */
1120 if (dd->mode == SPI_DMOV_MODE)
1121 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1122 else
1123 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1124 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1125
1126 msm_spi_set_config(dd, bpw);
1127
1128 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1129 spi_ioc_orig = spi_ioc;
1130 if (dd->cur_msg->spi->mode & SPI_CPOL)
1131 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1132 else
1133 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1134 chip_select = dd->cur_msg->spi->chip_select << 2;
1135 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1136 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1137 if (!dd->cur_transfer->cs_change)
1138 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1139 if (spi_ioc != spi_ioc_orig)
1140 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1141
1142 if (dd->mode == SPI_DMOV_MODE) {
1143 msm_spi_setup_dm_transfer(dd);
1144 msm_spi_enqueue_dm_commands(dd);
1145 }
1146 /* The output fifo interrupt handler will handle all writes after
1147 the first. Restricting this to one write avoids contention
1148 issues and race conditions between this thread and the int handler
1149 */
1150 else if (dd->mode == SPI_FIFO_MODE) {
1151 if (msm_spi_prepare_for_write(dd))
1152 goto transfer_end;
1153 msm_spi_start_write(dd, read_count);
1154 }
1155
1156 /* Only enter the RUN state after the first word is written into
1157 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1158 might fire before the first word is written resulting in a
1159 possible race condition.
1160 */
1161 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1162 goto transfer_end;
1163
1164 timeout = 100 * msecs_to_jiffies(
1165 DIV_ROUND_UP(dd->cur_msg_len * 8,
1166 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1167
1168 /* Assume success, this might change later upon transaction result */
1169 dd->cur_msg->status = 0;
1170 do {
1171 if (!wait_for_completion_timeout(&dd->transfer_complete,
1172 timeout)) {
1173 dev_err(dd->dev, "%s: SPI transaction "
1174 "timeout\n", __func__);
1175 dd->cur_msg->status = -EIO;
1176 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001177 msm_dmov_flush(dd->tx_dma_chan, 1);
1178 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 }
1180 break;
1181 }
1182 } while (msm_spi_dm_send_next(dd));
1183
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001184 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185transfer_end:
1186 if (dd->mode == SPI_DMOV_MODE)
1187 msm_spi_unmap_dma_buffers(dd);
1188 dd->mode = SPI_MODE_NONE;
1189
1190 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1191 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1192 dd->base + SPI_IO_CONTROL);
1193}
1194
1195static void get_transfer_length(struct msm_spi *dd)
1196{
1197 struct spi_transfer *tr;
1198 int num_xfrs = 0;
1199 int readlen = 0;
1200 int writelen = 0;
1201
1202 dd->cur_msg_len = 0;
1203 dd->multi_xfr = 0;
1204 dd->read_len = dd->write_len = 0;
1205
1206 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1207 if (tr->tx_buf)
1208 writelen += tr->len;
1209 if (tr->rx_buf)
1210 readlen += tr->len;
1211 dd->cur_msg_len += tr->len;
1212 num_xfrs++;
1213 }
1214
1215 if (num_xfrs == 2) {
1216 struct spi_transfer *first_xfr = dd->cur_transfer;
1217
1218 dd->multi_xfr = 1;
1219 tr = list_entry(first_xfr->transfer_list.next,
1220 struct spi_transfer,
1221 transfer_list);
1222 /*
1223 * We update dd->read_len and dd->write_len only
1224 * for WR-WR and WR-RD transfers.
1225 */
1226 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1227 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1228 ((!tr->tx_buf) && (tr->rx_buf))) {
1229 dd->read_len = readlen;
1230 dd->write_len = writelen;
1231 }
1232 }
1233 } else if (num_xfrs > 1)
1234 dd->multi_xfr = 1;
1235}
1236
1237static inline int combine_transfers(struct msm_spi *dd)
1238{
1239 struct spi_transfer *t = dd->cur_transfer;
1240 struct spi_transfer *nxt;
1241 int xfrs_grped = 1;
1242
1243 dd->cur_msg_len = dd->cur_transfer->len;
1244 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1245 nxt = list_entry(t->transfer_list.next,
1246 struct spi_transfer,
1247 transfer_list);
1248 if (t->cs_change != nxt->cs_change)
1249 return xfrs_grped;
1250 dd->cur_msg_len += nxt->len;
1251 xfrs_grped++;
1252 t = nxt;
1253 }
1254 return xfrs_grped;
1255}
1256
Harini Jayaraman093938a2012-04-20 15:33:23 -06001257static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1258{
1259 u32 spi_ioc;
1260 u32 spi_ioc_orig;
1261
1262 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1263 spi_ioc_orig = spi_ioc;
1264 if (set_flag)
1265 spi_ioc |= SPI_IO_C_FORCE_CS;
1266 else
1267 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1268
1269 if (spi_ioc != spi_ioc_orig)
1270 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1271}
1272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273static void msm_spi_process_message(struct msm_spi *dd)
1274{
1275 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001276 int cs_num;
1277 int rc;
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001278 bool xfer_delay = false;
1279 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001282 cs_num = dd->cur_msg->spi->chip_select;
1283 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1284 (!(dd->cs_gpios[cs_num].valid)) &&
1285 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1286 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1287 spi_cs_rsrcs[cs_num]);
1288 if (rc) {
1289 dev_err(dd->dev, "gpio_request for pin %d failed with "
1290 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1291 rc);
1292 return;
1293 }
1294 dd->cs_gpios[cs_num].valid = 1;
1295 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001297 list_for_each_entry(tr,
1298 &dd->cur_msg->transfers,
1299 transfer_list) {
1300 if (tr->delay_usecs) {
1301 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1302 tr->delay_usecs);
1303 xfer_delay = true;
1304 break;
1305 }
1306 }
1307
1308 /* Don't combine xfers if delay is needed after every xfer */
1309 if (dd->qup_ver || xfer_delay) {
1310 if (dd->qup_ver)
1311 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001313 &dd->cur_msg->transfers,
1314 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001315 struct spi_transfer *t = dd->cur_transfer;
1316 struct spi_transfer *nxt;
1317
1318 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1319 nxt = list_entry(t->transfer_list.next,
1320 struct spi_transfer,
1321 transfer_list);
1322
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001323 if (dd->qup_ver &&
1324 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001325 write_force_cs(dd, 1);
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001326 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001327 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001329
1330 dd->cur_msg_len = dd->cur_transfer->len;
1331 msm_spi_process_transfer(dd);
1332 }
1333 } else {
1334 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1335 struct spi_transfer,
1336 transfer_list);
1337 get_transfer_length(dd);
1338 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1339 /*
1340 * Handling of multi-transfers.
1341 * FIFO mode is used by default
1342 */
1343 list_for_each_entry(dd->cur_transfer,
1344 &dd->cur_msg->transfers,
1345 transfer_list) {
1346 if (!dd->cur_transfer->len)
1347 goto error;
1348 if (xfrs_grped) {
1349 xfrs_grped--;
1350 continue;
1351 } else {
1352 dd->read_len = dd->write_len = 0;
1353 xfrs_grped = combine_transfers(dd);
1354 }
1355
1356 dd->cur_tx_transfer = dd->cur_transfer;
1357 dd->cur_rx_transfer = dd->cur_transfer;
1358 msm_spi_process_transfer(dd);
1359 xfrs_grped--;
1360 }
1361 } else {
1362 /* Handling of a single transfer or
1363 * WR-WR or WR-RD transfers
1364 */
1365 if ((!dd->cur_msg->is_dma_mapped) &&
1366 (msm_use_dm(dd, dd->cur_transfer,
1367 dd->cur_transfer->bits_per_word))) {
1368 /* Mapping of DMA buffers */
1369 int ret = msm_spi_map_dma_buffers(dd);
1370 if (ret < 0) {
1371 dd->cur_msg->status = ret;
1372 goto error;
1373 }
1374 }
1375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001376 dd->cur_tx_transfer = dd->cur_transfer;
1377 dd->cur_rx_transfer = dd->cur_transfer;
1378 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001379 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001381
1382 return;
1383
1384error:
1385 if (dd->cs_gpios[cs_num].valid) {
1386 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1387 dd->cs_gpios[cs_num].valid = 0;
1388 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001389}
1390
1391/* workqueue - pull messages from queue & process */
1392static void msm_spi_workq(struct work_struct *work)
1393{
1394 struct msm_spi *dd =
1395 container_of(work, struct msm_spi, work_data);
1396 unsigned long flags;
1397 u32 status_error = 0;
Alok Chauhanb5f53792012-08-22 19:54:45 +05301398 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399
1400 mutex_lock(&dd->core_lock);
1401
1402 /* Don't allow power collapse until we release mutex */
1403 if (pm_qos_request_active(&qos_req_list))
1404 pm_qos_update_request(&qos_req_list,
1405 dd->pm_lat);
1406 if (dd->use_rlock)
1407 remote_mutex_lock(&dd->r_lock);
1408
Alok Chauhanb5f53792012-08-22 19:54:45 +05301409 /* Configure the spi clk, miso, mosi and cs gpio */
1410 if (dd->pdata->gpio_config) {
1411 rc = dd->pdata->gpio_config();
1412 if (rc) {
1413 dev_err(dd->dev,
1414 "%s: error configuring GPIOs\n",
1415 __func__);
1416 status_error = 1;
1417 }
1418 }
1419
1420 rc = msm_spi_request_gpios(dd);
1421 if (rc)
1422 status_error = 1;
1423
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001424 clk_prepare_enable(dd->clk);
1425 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426 msm_spi_enable_irqs(dd);
1427
1428 if (!msm_spi_is_valid_state(dd)) {
1429 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1430 __func__);
1431 status_error = 1;
1432 }
1433
1434 spin_lock_irqsave(&dd->queue_lock, flags);
1435 while (!list_empty(&dd->queue)) {
1436 dd->cur_msg = list_entry(dd->queue.next,
1437 struct spi_message, queue);
1438 list_del_init(&dd->cur_msg->queue);
1439 spin_unlock_irqrestore(&dd->queue_lock, flags);
1440 if (status_error)
1441 dd->cur_msg->status = -EIO;
1442 else
1443 msm_spi_process_message(dd);
1444 if (dd->cur_msg->complete)
1445 dd->cur_msg->complete(dd->cur_msg->context);
1446 spin_lock_irqsave(&dd->queue_lock, flags);
1447 }
1448 dd->transfer_pending = 0;
1449 spin_unlock_irqrestore(&dd->queue_lock, flags);
1450
1451 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001452 clk_disable_unprepare(dd->clk);
1453 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454
Alok Chauhanb5f53792012-08-22 19:54:45 +05301455 /* Free the spi clk, miso, mosi, cs gpio */
1456 if (!rc && dd->pdata && dd->pdata->gpio_release)
1457 dd->pdata->gpio_release();
1458 if (!rc)
1459 msm_spi_free_gpios(dd);
1460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001461 if (dd->use_rlock)
1462 remote_mutex_unlock(&dd->r_lock);
1463
1464 if (pm_qos_request_active(&qos_req_list))
1465 pm_qos_update_request(&qos_req_list,
1466 PM_QOS_DEFAULT_VALUE);
1467
1468 mutex_unlock(&dd->core_lock);
1469 /* If needed, this can be done after the current message is complete,
1470 and work can be continued upon resume. No motivation for now. */
1471 if (dd->suspended)
1472 wake_up_interruptible(&dd->continue_suspend);
1473}
1474
1475static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1476{
1477 struct msm_spi *dd;
1478 unsigned long flags;
1479 struct spi_transfer *tr;
1480
1481 dd = spi_master_get_devdata(spi->master);
1482 if (dd->suspended)
1483 return -EBUSY;
1484
1485 if (list_empty(&msg->transfers) || !msg->complete)
1486 return -EINVAL;
1487
1488 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1489 /* Check message parameters */
1490 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1491 (tr->bits_per_word &&
1492 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1493 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1494 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1495 "tx=%p, rx=%p\n",
1496 tr->speed_hz, tr->bits_per_word,
1497 tr->tx_buf, tr->rx_buf);
1498 return -EINVAL;
1499 }
1500 }
1501
1502 spin_lock_irqsave(&dd->queue_lock, flags);
1503 if (dd->suspended) {
1504 spin_unlock_irqrestore(&dd->queue_lock, flags);
1505 return -EBUSY;
1506 }
1507 dd->transfer_pending = 1;
1508 list_add_tail(&msg->queue, &dd->queue);
1509 spin_unlock_irqrestore(&dd->queue_lock, flags);
1510 queue_work(dd->workqueue, &dd->work_data);
1511 return 0;
1512}
1513
1514static int msm_spi_setup(struct spi_device *spi)
1515{
1516 struct msm_spi *dd;
1517 int rc = 0;
1518 u32 spi_ioc;
1519 u32 spi_config;
1520 u32 mask;
1521
1522 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1523 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1524 __func__, spi->bits_per_word);
1525 rc = -EINVAL;
1526 }
1527 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1528 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1529 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1530 rc = -EINVAL;
1531 }
1532
1533 if (rc)
1534 goto err_setup_exit;
1535
1536 dd = spi_master_get_devdata(spi->master);
1537
1538 mutex_lock(&dd->core_lock);
1539 if (dd->suspended) {
1540 mutex_unlock(&dd->core_lock);
1541 return -EBUSY;
1542 }
1543
1544 if (dd->use_rlock)
1545 remote_mutex_lock(&dd->r_lock);
1546
Alok Chauhanb5f53792012-08-22 19:54:45 +05301547 /* Configure the spi clk, miso, mosi, cs gpio */
1548 if (dd->pdata->gpio_config) {
1549 rc = dd->pdata->gpio_config();
1550 if (rc) {
1551 dev_err(&spi->dev,
1552 "%s: error configuring GPIOs\n",
1553 __func__);
1554 rc = -ENXIO;
1555 goto err_setup_gpio;
1556 }
1557 }
1558
1559 rc = msm_spi_request_gpios(dd);
1560 if (rc) {
1561 rc = -ENXIO;
1562 goto err_setup_gpio;
1563 }
1564
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001565 clk_prepare_enable(dd->clk);
1566 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567
1568 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1569 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1570 if (spi->mode & SPI_CS_HIGH)
1571 spi_ioc |= mask;
1572 else
1573 spi_ioc &= ~mask;
1574 if (spi->mode & SPI_CPOL)
1575 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1576 else
1577 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1578
1579 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1580
1581 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1582 if (spi->mode & SPI_LOOP)
1583 spi_config |= SPI_CFG_LOOPBACK;
1584 else
1585 spi_config &= ~SPI_CFG_LOOPBACK;
1586 if (spi->mode & SPI_CPHA)
1587 spi_config &= ~SPI_CFG_INPUT_FIRST;
1588 else
1589 spi_config |= SPI_CFG_INPUT_FIRST;
1590 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1591
1592 /* Ensure previous write completed before disabling the clocks */
1593 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001594 clk_disable_unprepare(dd->clk);
1595 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596
Alok Chauhanb5f53792012-08-22 19:54:45 +05301597 /* Free the spi clk, miso, mosi, cs gpio */
1598 if (dd->pdata && dd->pdata->gpio_release)
1599 dd->pdata->gpio_release();
1600 msm_spi_free_gpios(dd);
1601
1602err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001603 if (dd->use_rlock)
1604 remote_mutex_unlock(&dd->r_lock);
1605 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001606err_setup_exit:
1607 return rc;
1608}
1609
1610#ifdef CONFIG_DEBUG_FS
1611static int debugfs_iomem_x32_set(void *data, u64 val)
1612{
1613 writel_relaxed(val, data);
1614 /* Ensure the previous write completed. */
1615 mb();
1616 return 0;
1617}
1618
1619static int debugfs_iomem_x32_get(void *data, u64 *val)
1620{
1621 *val = readl_relaxed(data);
1622 /* Ensure the previous read completed. */
1623 mb();
1624 return 0;
1625}
1626
1627DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1628 debugfs_iomem_x32_set, "0x%08llx\n");
1629
1630static void spi_debugfs_init(struct msm_spi *dd)
1631{
1632 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1633 if (dd->dent_spi) {
1634 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001636 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1637 dd->debugfs_spi_regs[i] =
1638 debugfs_create_file(
1639 debugfs_spi_regs[i].name,
1640 debugfs_spi_regs[i].mode,
1641 dd->dent_spi,
1642 dd->base + debugfs_spi_regs[i].offset,
1643 &fops_iomem_x32);
1644 }
1645 }
1646}
1647
1648static void spi_debugfs_exit(struct msm_spi *dd)
1649{
1650 if (dd->dent_spi) {
1651 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001652
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653 debugfs_remove_recursive(dd->dent_spi);
1654 dd->dent_spi = NULL;
1655 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1656 dd->debugfs_spi_regs[i] = NULL;
1657 }
1658}
1659#else
1660static void spi_debugfs_init(struct msm_spi *dd) {}
1661static void spi_debugfs_exit(struct msm_spi *dd) {}
1662#endif
1663
1664/* ===Device attributes begin=== */
1665static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1666 char *buf)
1667{
1668 struct spi_master *master = dev_get_drvdata(dev);
1669 struct msm_spi *dd = spi_master_get_devdata(master);
1670
1671 return snprintf(buf, PAGE_SIZE,
1672 "Device %s\n"
1673 "rx fifo_size = %d spi words\n"
1674 "tx fifo_size = %d spi words\n"
1675 "use_dma ? %s\n"
1676 "rx block size = %d bytes\n"
1677 "tx block size = %d bytes\n"
Alok Chauhanc27843e2013-02-15 16:04:20 +05301678 "input burst size = %d bytes\n"
1679 "output burst size = %d bytes\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001680 "DMA configuration:\n"
1681 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1682 "--statistics--\n"
1683 "Rx isrs = %d\n"
1684 "Tx isrs = %d\n"
1685 "DMA error = %d\n"
1686 "--debug--\n"
1687 "NA yet\n",
1688 dev_name(dev),
1689 dd->input_fifo_size,
1690 dd->output_fifo_size,
1691 dd->use_dma ? "yes" : "no",
1692 dd->input_block_size,
1693 dd->output_block_size,
Alok Chauhanc27843e2013-02-15 16:04:20 +05301694 dd->input_burst_size,
1695 dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696 dd->tx_dma_chan,
1697 dd->rx_dma_chan,
1698 dd->tx_dma_crci,
1699 dd->rx_dma_crci,
1700 dd->stat_rx + dd->stat_dmov_rx,
1701 dd->stat_tx + dd->stat_dmov_tx,
1702 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1703 );
1704}
1705
1706/* Reset statistics on write */
1707static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1708 const char *buf, size_t count)
1709{
1710 struct msm_spi *dd = dev_get_drvdata(dev);
1711 dd->stat_rx = 0;
1712 dd->stat_tx = 0;
1713 dd->stat_dmov_rx = 0;
1714 dd->stat_dmov_tx = 0;
1715 dd->stat_dmov_rx_err = 0;
1716 dd->stat_dmov_tx_err = 0;
1717 return count;
1718}
1719
1720static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1721
1722static struct attribute *dev_attrs[] = {
1723 &dev_attr_stats.attr,
1724 NULL,
1725};
1726
1727static struct attribute_group dev_attr_grp = {
1728 .attrs = dev_attrs,
1729};
1730/* ===Device attributes end=== */
1731
1732/**
1733 * spi_dmov_tx_complete_func - DataMover tx completion callback
1734 *
1735 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1736 * spinlock @msm_dmov_lock held.
1737 */
1738static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1739 unsigned int result,
1740 struct msm_dmov_errdata *err)
1741{
1742 struct msm_spi *dd;
1743
1744 if (!(result & DMOV_RSLT_VALID)) {
1745 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1746 return;
1747 }
1748 /* restore original context */
1749 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301750 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001751 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301752 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1753 return;
1754 complete(&dd->transfer_complete);
1755 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001756 /* Error or flush */
1757 if (result & DMOV_RSLT_ERROR) {
1758 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1759 dd->stat_dmov_tx_err++;
1760 }
1761 if (result & DMOV_RSLT_FLUSH) {
1762 /*
1763 * Flushing normally happens in process of
1764 * removing, when we are waiting for outstanding
1765 * DMA commands to be flushed.
1766 */
1767 dev_info(dd->dev,
1768 "DMA channel flushed (0x%08x)\n", result);
1769 }
1770 if (err)
1771 dev_err(dd->dev,
1772 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1773 err->flush[0], err->flush[1], err->flush[2],
1774 err->flush[3], err->flush[4], err->flush[5]);
1775 dd->cur_msg->status = -EIO;
1776 complete(&dd->transfer_complete);
1777 }
1778}
1779
1780/**
1781 * spi_dmov_rx_complete_func - DataMover rx completion callback
1782 *
1783 * Executed in IRQ context (Data Mover's IRQ)
1784 * DataMover's spinlock @msm_dmov_lock held.
1785 */
1786static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1787 unsigned int result,
1788 struct msm_dmov_errdata *err)
1789{
1790 struct msm_spi *dd;
1791
1792 if (!(result & DMOV_RSLT_VALID)) {
1793 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1794 result, cmd);
1795 return;
1796 }
1797 /* restore original context */
1798 dd = container_of(cmd, struct msm_spi, rx_hdr);
1799 if (result & DMOV_RSLT_DONE) {
1800 dd->stat_dmov_rx++;
1801 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1802 return;
1803 complete(&dd->transfer_complete);
1804 } else {
1805 /** Error or flush */
1806 if (result & DMOV_RSLT_ERROR) {
1807 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1808 dd->stat_dmov_rx_err++;
1809 }
1810 if (result & DMOV_RSLT_FLUSH) {
1811 dev_info(dd->dev,
1812 "DMA channel flushed(0x%08x)\n", result);
1813 }
1814 if (err)
1815 dev_err(dd->dev,
1816 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1817 err->flush[0], err->flush[1], err->flush[2],
1818 err->flush[3], err->flush[4], err->flush[5]);
1819 dd->cur_msg->status = -EIO;
1820 complete(&dd->transfer_complete);
1821 }
1822}
1823
Alok Chauhanc27843e2013-02-15 16:04:20 +05301824static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
1825 int output_burst_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001826{
1827 u32 cache_line = dma_get_cache_alignment();
Alok Chauhanc27843e2013-02-15 16:04:20 +05301828 int burst_size = (input_burst_size > output_burst_size) ?
1829 input_burst_size : output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830
1831 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
Alok Chauhanc27843e2013-02-15 16:04:20 +05301832 roundup(burst_size, cache_line))*2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833}
1834
1835static void msm_spi_teardown_dma(struct msm_spi *dd)
1836{
1837 int limit = 0;
1838
1839 if (!dd->use_dma)
1840 return;
1841
1842 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001843 msm_dmov_flush(dd->tx_dma_chan, 1);
1844 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001845 msleep(10);
1846 }
1847
Alok Chauhanc27843e2013-02-15 16:04:20 +05301848 dma_free_coherent(NULL,
1849 get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
1850 dd->tx_dmov_cmd,
1851 dd->tx_dmov_cmd_dma);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1853 dd->tx_padding = dd->rx_padding = NULL;
1854}
1855
1856static __init int msm_spi_init_dma(struct msm_spi *dd)
1857{
1858 dmov_box *box;
1859 u32 cache_line = dma_get_cache_alignment();
1860
1861 /* Allocate all as one chunk, since all is smaller than page size */
1862
1863 /* We send NULL device, since it requires coherent_dma_mask id
1864 device definition, we're okay with using system pool */
Alok Chauhanc27843e2013-02-15 16:04:20 +05301865 dd->tx_dmov_cmd
1866 = dma_alloc_coherent(NULL,
1867 get_chunk_size(dd, dd->input_burst_size,
1868 dd->output_burst_size),
1869 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001870 if (dd->tx_dmov_cmd == NULL)
1871 return -ENOMEM;
1872
1873 /* DMA addresses should be 64 bit aligned aligned */
1874 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1875 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1876 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1877 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1878
1879 /* Buffers should be aligned to cache line */
1880 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1881 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1882 sizeof(struct spi_dmov_cmd), cache_line);
Alok Chauhanc27843e2013-02-15 16:04:20 +05301883 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
1884 dd->output_burst_size), cache_line);
1885 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 cache_line);
1887
1888 /* Setup DM commands */
1889 box = &(dd->rx_dmov_cmd->box);
1890 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1891 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1892 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1893 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1894 offsetof(struct spi_dmov_cmd, cmd_ptr));
1895 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896
1897 box = &(dd->tx_dmov_cmd->box);
1898 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1899 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1900 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1901 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1902 offsetof(struct spi_dmov_cmd, cmd_ptr));
1903 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904
1905 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1906 CMD_DST_CRCI(dd->tx_dma_crci);
1907 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1908 SPI_OUTPUT_FIFO;
1909 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1910 CMD_SRC_CRCI(dd->rx_dma_crci);
1911 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1912 SPI_INPUT_FIFO;
1913
1914 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001915 msm_dmov_flush(dd->tx_dma_chan, 1);
1916 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917
1918 return 0;
1919}
1920
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001921struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1922{
1923 struct device_node *node = pdev->dev.of_node;
1924 struct msm_spi_platform_data *pdata;
1925
1926 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1927 if (!pdata) {
1928 pr_err("Unable to allocate platform data\n");
1929 return NULL;
1930 }
1931
1932 of_property_read_u32(node, "spi-max-frequency",
1933 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05301934 of_property_read_u32(node, "infinite_mode",
1935 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001936
1937 return pdata;
1938}
1939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940static int __init msm_spi_probe(struct platform_device *pdev)
1941{
1942 struct spi_master *master;
1943 struct msm_spi *dd;
1944 struct resource *resource;
1945 int rc = -ENXIO;
1946 int locked = 0;
1947 int i = 0;
1948 int clk_enabled = 0;
1949 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001950 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001951 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952
1953 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1954 if (!master) {
1955 rc = -ENOMEM;
1956 dev_err(&pdev->dev, "master allocation failed\n");
1957 goto err_probe_exit;
1958 }
1959
1960 master->bus_num = pdev->id;
1961 master->mode_bits = SPI_SUPPORTED_MODES;
1962 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1963 master->setup = msm_spi_setup;
1964 master->transfer = msm_spi_transfer;
1965 platform_set_drvdata(pdev, master);
1966 dd = spi_master_get_devdata(master);
1967
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001968 if (pdev->dev.of_node) {
1969 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1970 master->dev.of_node = pdev->dev.of_node;
1971 pdata = msm_spi_dt_to_pdata(pdev);
1972 if (!pdata) {
1973 rc = -ENOMEM;
1974 goto err_probe_exit;
1975 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001976
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06001977 rc = of_property_read_u32(pdev->dev.of_node,
1978 "cell-index", &pdev->id);
1979 if (rc)
1980 dev_warn(&pdev->dev,
1981 "using default bus_num %d\n", pdev->id);
1982 else
1983 master->bus_num = pdev->id;
1984
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001985 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1986 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1987 i, &flags);
1988 }
1989
1990 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1991 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1992 pdev->dev.of_node, "cs-gpios",
1993 i, &flags);
1994 dd->cs_gpios[i].valid = 0;
1995 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001996 } else {
1997 pdata = pdev->dev.platform_data;
1998 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001999
2000 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2001 resource = platform_get_resource(pdev, IORESOURCE_IO,
2002 i);
2003 dd->spi_gpios[i] = resource ? resource->start : -1;
2004 }
2005
2006 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2007 resource = platform_get_resource(pdev, IORESOURCE_IO,
2008 i + ARRAY_SIZE(spi_rsrcs));
2009 dd->cs_gpios[i].gpio_num = resource ?
2010 resource->start : -1;
2011 dd->cs_gpios[i].valid = 0;
2012 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002013 }
2014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002016 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 if (!resource) {
2018 rc = -ENXIO;
2019 goto err_probe_res;
2020 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022 dd->mem_phys_addr = resource->start;
2023 dd->mem_size = resource_size(resource);
2024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002025 if (pdata) {
2026 if (pdata->dma_config) {
2027 rc = pdata->dma_config();
2028 if (rc) {
2029 dev_warn(&pdev->dev,
2030 "%s: DM mode not supported\n",
2031 __func__);
2032 dd->use_dma = 0;
2033 goto skip_dma_resources;
2034 }
2035 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002036 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037 if (resource) {
2038 dd->rx_dma_chan = resource->start;
2039 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002040 resource = platform_get_resource(pdev, IORESOURCE_DMA,
2041 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042 if (!resource) {
2043 rc = -ENXIO;
2044 goto err_probe_res;
2045 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047 dd->rx_dma_crci = resource->start;
2048 dd->tx_dma_crci = resource->end;
2049 dd->use_dma = 1;
2050 master->dma_alignment = dma_get_cache_alignment();
2051 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 }
2053
Alok Chauhanb5f53792012-08-22 19:54:45 +05302054skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002056 spin_lock_init(&dd->queue_lock);
2057 mutex_init(&dd->core_lock);
2058 INIT_LIST_HEAD(&dd->queue);
2059 INIT_WORK(&dd->work_data, msm_spi_workq);
2060 init_waitqueue_head(&dd->continue_suspend);
2061 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002062 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002063 if (!dd->workqueue)
2064 goto err_probe_workq;
2065
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002066 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2067 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002068 rc = -ENXIO;
2069 goto err_probe_reqmem;
2070 }
2071
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002072 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2073 if (!dd->base) {
2074 rc = -ENOMEM;
2075 goto err_probe_reqmem;
2076 }
2077
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002078 if (pdata && pdata->rsl_id) {
2079 struct remote_mutex_id rmid;
2080 rmid.r_spinlock_id = pdata->rsl_id;
2081 rmid.delay_us = SPI_TRYLOCK_DELAY;
2082
2083 rc = remote_mutex_init(&dd->r_lock, &rmid);
2084 if (rc) {
2085 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2086 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2087 __func__, rc);
2088 goto err_probe_rlock_init;
2089 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 dd->use_rlock = 1;
2092 dd->pm_lat = pdata->pm_lat;
Alok Chauhanb5f53792012-08-22 19:54:45 +05302093 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
2094 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002095 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002097 mutex_lock(&dd->core_lock);
2098 if (dd->use_rlock)
2099 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002100
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002101 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002103 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002104 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002105 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002106 rc = PTR_ERR(dd->clk);
2107 goto err_probe_clk_get;
2108 }
2109
Matt Wagantallac294852011-08-17 15:44:58 -07002110 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002111 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002112 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002113 rc = PTR_ERR(dd->pclk);
2114 goto err_probe_pclk_get;
2115 }
2116
2117 if (pdata && pdata->max_clock_speed)
2118 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2119
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002120 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002122 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002123 __func__);
2124 goto err_probe_clk_enable;
2125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002127 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002128 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002130 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002131 __func__);
2132 goto err_probe_pclk_enable;
2133 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002135 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002136 rc = msm_spi_configure_gsbi(dd, pdev);
2137 if (rc)
2138 goto err_probe_gsbi;
2139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140 msm_spi_calculate_fifo_size(dd);
2141 if (dd->use_dma) {
2142 rc = msm_spi_init_dma(dd);
2143 if (rc)
2144 goto err_probe_dma;
2145 }
2146
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002147 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002148 /*
2149 * The SPI core generates a bogus input overrun error on some targets,
2150 * when a transition from run to reset state occurs and if the FIFO has
2151 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2152 * bit.
2153 */
2154 msm_spi_enable_error_flags(dd);
2155
2156 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2157 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2158 if (rc)
2159 goto err_probe_state;
2160
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002161 clk_disable_unprepare(dd->clk);
2162 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002163 clk_enabled = 0;
2164 pclk_enabled = 0;
2165
2166 dd->suspended = 0;
2167 dd->transfer_pending = 0;
2168 dd->multi_xfr = 0;
2169 dd->mode = SPI_MODE_NONE;
2170
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002171 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172 if (rc)
2173 goto err_probe_irq;
2174
2175 msm_spi_disable_irqs(dd);
2176 if (dd->use_rlock)
2177 remote_mutex_unlock(&dd->r_lock);
2178
2179 mutex_unlock(&dd->core_lock);
2180 locked = 0;
2181
2182 rc = spi_register_master(master);
2183 if (rc)
2184 goto err_probe_reg_master;
2185
2186 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2187 if (rc) {
2188 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2189 goto err_attrs;
2190 }
2191
2192 spi_debugfs_init(dd);
Kiran Gunda2b285652012-07-30 13:22:39 +05302193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002194 return 0;
2195
2196err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002197 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002198err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199err_probe_irq:
2200err_probe_state:
2201 msm_spi_teardown_dma(dd);
2202err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002203err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002204 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002205 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002206err_probe_pclk_enable:
2207 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002208 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002209err_probe_clk_enable:
2210 clk_put(dd->pclk);
2211err_probe_pclk_get:
2212 clk_put(dd->clk);
2213err_probe_clk_get:
2214 if (locked) {
2215 if (dd->use_rlock)
2216 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218 mutex_unlock(&dd->core_lock);
2219 }
2220err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002221err_probe_reqmem:
2222 destroy_workqueue(dd->workqueue);
2223err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002224err_probe_res:
2225 spi_master_put(master);
2226err_probe_exit:
2227 return rc;
2228}
2229
2230#ifdef CONFIG_PM
2231static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2232{
2233 struct spi_master *master = platform_get_drvdata(pdev);
2234 struct msm_spi *dd;
2235 unsigned long flags;
2236
2237 if (!master)
2238 goto suspend_exit;
2239 dd = spi_master_get_devdata(master);
2240 if (!dd)
2241 goto suspend_exit;
2242
2243 /* Make sure nothing is added to the queue while we're suspending */
2244 spin_lock_irqsave(&dd->queue_lock, flags);
2245 dd->suspended = 1;
2246 spin_unlock_irqrestore(&dd->queue_lock, flags);
2247
2248 /* Wait for transactions to end, or time out */
2249 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002250
2251suspend_exit:
2252 return 0;
2253}
2254
2255static int msm_spi_resume(struct platform_device *pdev)
2256{
2257 struct spi_master *master = platform_get_drvdata(pdev);
2258 struct msm_spi *dd;
2259
2260 if (!master)
2261 goto resume_exit;
2262 dd = spi_master_get_devdata(master);
2263 if (!dd)
2264 goto resume_exit;
2265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002266 dd->suspended = 0;
2267resume_exit:
2268 return 0;
2269}
2270#else
2271#define msm_spi_suspend NULL
2272#define msm_spi_resume NULL
2273#endif /* CONFIG_PM */
2274
2275static int __devexit msm_spi_remove(struct platform_device *pdev)
2276{
2277 struct spi_master *master = platform_get_drvdata(pdev);
2278 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002279
2280 pm_qos_remove_request(&qos_req_list);
2281 spi_debugfs_exit(dd);
2282 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002284 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002286 clk_put(dd->clk);
2287 clk_put(dd->pclk);
2288 destroy_workqueue(dd->workqueue);
2289 platform_set_drvdata(pdev, 0);
2290 spi_unregister_master(master);
2291 spi_master_put(master);
2292
2293 return 0;
2294}
2295
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002296static struct of_device_id msm_spi_dt_match[] = {
2297 {
2298 .compatible = "qcom,spi-qup-v2",
2299 },
2300 {}
2301};
2302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303static struct platform_driver msm_spi_driver = {
2304 .driver = {
2305 .name = SPI_DRV_NAME,
2306 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002307 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002308 },
2309 .suspend = msm_spi_suspend,
2310 .resume = msm_spi_resume,
2311 .remove = __exit_p(msm_spi_remove),
2312};
2313
2314static int __init msm_spi_init(void)
2315{
2316 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2317}
2318module_init(msm_spi_init);
2319
2320static void __exit msm_spi_exit(void)
2321{
2322 platform_driver_unregister(&msm_spi_driver);
2323}
2324module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002325
2326MODULE_LICENSE("GPL v2");
2327MODULE_VERSION("0.4");
2328MODULE_ALIAS("platform:"SPI_DRV_NAME);