blob: bbbb40110fae341b6fff5373ce2b07d1c1efc5ae [file] [log] [blame]
Alok Chauhanc27843e2013-02-15 16:04:20 +05301/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Alok Chauhan2a647c92013-03-12 18:34:43 +053044#include <linux/pm_runtime.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070045#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Alok Chauhan2a647c92013-03-12 18:34:43 +053047static int msm_spi_pm_resume_runtime(struct device *device);
48static int msm_spi_pm_suspend_runtime(struct device *device);
49
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
51 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052{
53 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 unsigned long gsbi_mem_phys_addr;
55 size_t gsbi_mem_size;
56 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070058 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070060 return 0;
61
62 gsbi_mem_phys_addr = resource->start;
63 gsbi_mem_size = resource_size(resource);
64 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070067
68 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
69 gsbi_mem_size);
70 if (!gsbi_base)
71 return -ENXIO;
72
73 /* Set GSBI to SPI mode */
74 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075
76 return 0;
77}
78
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070079static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070081 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
82 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
83 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
84 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
85 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
86 if (dd->qup_ver)
87 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088}
89
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090static inline int msm_spi_request_gpios(struct msm_spi *dd)
91{
92 int i;
93 int result = 0;
94
95 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
96 if (dd->spi_gpios[i] >= 0) {
97 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
98 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060099 dev_err(dd->dev, "%s: gpio_request for pin %d "
100 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 dd->spi_gpios[i], result);
102 goto error;
103 }
104 }
105 }
106 return 0;
107
108error:
109 for (; --i >= 0;) {
110 if (dd->spi_gpios[i] >= 0)
111 gpio_free(dd->spi_gpios[i]);
112 }
113 return result;
114}
115
116static inline void msm_spi_free_gpios(struct msm_spi *dd)
117{
118 int i;
119
120 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
121 if (dd->spi_gpios[i] >= 0)
122 gpio_free(dd->spi_gpios[i]);
123 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600124
125 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
126 if (dd->cs_gpios[i].valid) {
127 gpio_free(dd->cs_gpios[i].gpio_num);
128 dd->cs_gpios[i].valid = 0;
129 }
130 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131}
132
Gilad Avidov70af2152012-10-18 09:34:35 -0600133/**
134 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
135 * @clk the clock for which to find nearest lower rate
136 * @rate clock frequency in Hz
137 * @return nearest lower rate or negative error value
138 *
139 * Public clock API extends clk_round_rate which is a ceiling function. This
140 * function is a floor function implemented as a binary search using the
141 * ceiling function.
142 */
143static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
144{
145 long lowest_available, nearest_low, step_size, cur;
146 long step_direction = -1;
147 long guess = rate;
148 int max_steps = 10;
149
150 cur = clk_round_rate(clk, rate);
151 if (cur == rate)
152 return rate;
153
154 /* if we got here then: cur > rate */
155 lowest_available = clk_round_rate(clk, 0);
156 if (lowest_available > rate)
157 return -EINVAL;
158
159 step_size = (rate - lowest_available) >> 1;
160 nearest_low = lowest_available;
161
162 while (max_steps-- && step_size) {
163 guess += step_size * step_direction;
164
165 cur = clk_round_rate(clk, guess);
166
167 if ((cur < rate) && (cur > nearest_low))
168 nearest_low = cur;
169
170 /*
171 * if we stepped too far, then start stepping in the other
172 * direction with half the step size
173 */
174 if (((cur > rate) && (step_direction > 0))
175 || ((cur < rate) && (step_direction < 0))) {
176 step_direction = -step_direction;
177 step_size >>= 1;
178 }
179 }
180 return nearest_low;
181}
182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183static void msm_spi_clock_set(struct msm_spi *dd, int speed)
184{
Gilad Avidov70af2152012-10-18 09:34:35 -0600185 long rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 int rc;
187
Gilad Avidov70af2152012-10-18 09:34:35 -0600188 rate = msm_spi_clk_max_rate(dd->clk, speed);
189 if (rate < 0) {
190 dev_err(dd->dev,
191 "%s: no match found for requested clock frequency:%d",
192 __func__, speed);
193 return;
194 }
195
196 rc = clk_set_rate(dd->clk, rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 if (!rc)
Gilad Avidov70af2152012-10-18 09:34:35 -0600198 dd->clock_speed = rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199}
200
201static int msm_spi_calculate_size(int *fifo_size,
202 int *block_size,
203 int block,
204 int mult)
205{
206 int words;
207
208 switch (block) {
209 case 0:
210 words = 1; /* 4 bytes */
211 break;
212 case 1:
213 words = 4; /* 16 bytes */
214 break;
215 case 2:
216 words = 8; /* 32 bytes */
217 break;
218 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700219 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 switch (mult) {
223 case 0:
224 *fifo_size = words * 2;
225 break;
226 case 1:
227 *fifo_size = words * 4;
228 break;
229 case 2:
230 *fifo_size = words * 8;
231 break;
232 case 3:
233 *fifo_size = words * 16;
234 break;
235 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700236 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700238
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 *block_size = words * sizeof(u32); /* in bytes */
240 return 0;
241}
242
243static void get_next_transfer(struct msm_spi *dd)
244{
245 struct spi_transfer *t = dd->cur_transfer;
246
247 if (t->transfer_list.next != &dd->cur_msg->transfers) {
248 dd->cur_transfer = list_entry(t->transfer_list.next,
249 struct spi_transfer,
250 transfer_list);
251 dd->write_buf = dd->cur_transfer->tx_buf;
252 dd->read_buf = dd->cur_transfer->rx_buf;
253 }
254}
255
256static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
257{
258 u32 spi_iom;
259 int block;
260 int mult;
261
262 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
263
264 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
265 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
266 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
267 block, mult)) {
268 goto fifo_size_err;
269 }
270
271 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
272 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
273 if (msm_spi_calculate_size(&dd->output_fifo_size,
274 &dd->output_block_size, block, mult)) {
275 goto fifo_size_err;
276 }
277 /* DM mode is not available for this block size */
278 if (dd->input_block_size == 4 || dd->output_block_size == 4)
279 dd->use_dma = 0;
280
Alok Chauhanc27843e2013-02-15 16:04:20 +0530281 if (dd->use_dma) {
282 dd->input_burst_size = max(dd->input_block_size,
283 DM_BURST_SIZE);
284 dd->output_burst_size = max(dd->output_block_size,
285 DM_BURST_SIZE);
286 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 return;
288
289fifo_size_err:
290 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700291 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 return;
293}
294
295static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
296{
297 u32 data_in;
298 int i;
299 int shift;
300
301 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
302 if (dd->read_buf) {
303 for (i = 0; (i < dd->bytes_per_word) &&
304 dd->rx_bytes_remaining; i++) {
305 /* The data format depends on bytes_per_word:
306 4 bytes: 0x12345678
307 3 bytes: 0x00123456
308 2 bytes: 0x00001234
309 1 byte : 0x00000012
310 */
311 shift = 8 * (dd->bytes_per_word - i - 1);
312 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
313 dd->rx_bytes_remaining--;
314 }
315 } else {
316 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
317 dd->rx_bytes_remaining -= dd->bytes_per_word;
318 else
319 dd->rx_bytes_remaining = 0;
320 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 dd->read_xfr_cnt++;
323 if (dd->multi_xfr) {
324 if (!dd->rx_bytes_remaining)
325 dd->read_xfr_cnt = 0;
326 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
327 dd->read_len) {
328 struct spi_transfer *t = dd->cur_rx_transfer;
329 if (t->transfer_list.next != &dd->cur_msg->transfers) {
330 t = list_entry(t->transfer_list.next,
331 struct spi_transfer,
332 transfer_list);
333 dd->read_buf = t->rx_buf;
334 dd->read_len = t->len;
335 dd->read_xfr_cnt = 0;
336 dd->cur_rx_transfer = t;
337 }
338 }
339 }
340}
341
342static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
343{
344 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
345
346 return spi_op & SPI_OP_STATE_VALID;
347}
348
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600349static inline void msm_spi_udelay(unsigned long delay_usecs)
350{
351 /*
352 * For smaller values of delay, context switch time
353 * would negate the usage of usleep
354 */
355 if (delay_usecs > 20)
356 usleep_range(delay_usecs, delay_usecs);
357 else if (delay_usecs)
358 udelay(delay_usecs);
359}
360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361static inline int msm_spi_wait_valid(struct msm_spi *dd)
362{
363 unsigned long delay = 0;
364 unsigned long timeout = 0;
365
366 if (dd->clock_speed == 0)
367 return -EINVAL;
368 /*
369 * Based on the SPI clock speed, sufficient time
370 * should be given for the SPI state transition
371 * to occur
372 */
373 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
374 /*
375 * For small delay values, the default timeout would
376 * be one jiffy
377 */
378 if (delay < SPI_DELAY_THRESHOLD)
379 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600380
381 /* Adding one to round off to the nearest jiffy */
382 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 while (!msm_spi_is_valid_state(dd)) {
384 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600385 if (!msm_spi_is_valid_state(dd)) {
386 if (dd->cur_msg)
387 dd->cur_msg->status = -EIO;
388 dev_err(dd->dev, "%s: SPI operational state"
389 "not valid\n", __func__);
390 return -ETIMEDOUT;
391 } else
392 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 }
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600394 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 }
396 return 0;
397}
398
399static inline int msm_spi_set_state(struct msm_spi *dd,
400 enum msm_spi_state state)
401{
402 enum msm_spi_state cur_state;
403 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700404 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 cur_state = readl_relaxed(dd->base + SPI_STATE);
406 /* Per spec:
407 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
408 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
409 (state == SPI_OP_STATE_RESET)) {
410 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
411 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
412 } else {
413 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
414 dd->base + SPI_STATE);
415 }
416 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700417 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
419 return 0;
420}
421
422static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
423{
424 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
425
426 if (n != (*config & SPI_CFG_N))
427 *config = (*config & ~SPI_CFG_N) | n;
428
429 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
430 if (dd->read_buf == NULL)
431 *config |= SPI_NO_INPUT;
432 if (dd->write_buf == NULL)
433 *config |= SPI_NO_OUTPUT;
434 }
435}
436
437static void msm_spi_set_config(struct msm_spi *dd, int bpw)
438{
439 u32 spi_config;
440
441 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
442
443 if (dd->cur_msg->spi->mode & SPI_CPHA)
444 spi_config &= ~SPI_CFG_INPUT_FIRST;
445 else
446 spi_config |= SPI_CFG_INPUT_FIRST;
447 if (dd->cur_msg->spi->mode & SPI_LOOP)
448 spi_config |= SPI_CFG_LOOPBACK;
449 else
450 spi_config &= ~SPI_CFG_LOOPBACK;
451 msm_spi_add_configs(dd, &spi_config, bpw-1);
452 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
453 msm_spi_set_qup_config(dd, bpw);
454}
455
456static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
457{
458 dmov_box *box;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530459 int bytes_to_send, bytes_sent;
460 int tx_num_rows, rx_num_rows;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 u32 num_transfers;
462
463 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530464 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 if (dd->write_len && !dd->read_len) {
466 /* WR-WR transfer */
467 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
468 dd->write_buf = dd->temp_buf;
469 } else {
470 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
471 /* For WR-RD transfer, bytes_sent can be negative */
472 if (bytes_sent < 0)
473 bytes_sent = 0;
474 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530475 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gunda2b285652012-07-30 13:22:39 +0530476 * 4K bytes for targets that have only 12 bits in
477 * QUP_MAX_OUTPUT_CNT register. If the target supports
478 * more than 12bits then we send the data in chunks of
479 * the infinite_mode value that is defined in the
480 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530481 */
482 if (!dd->pdata->infinite_mode)
Kiran Gunda2b285652012-07-30 13:22:39 +0530483 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530484 else
Kiran Gunda2b285652012-07-30 13:22:39 +0530485 dd->max_trfr_len = (dd->pdata->infinite_mode) *
486 (dd->bytes_per_word);
487
488 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
489 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530492 dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
493 dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
494 tx_num_rows = bytes_to_send / dd->output_burst_size;
495 rx_num_rows = bytes_to_send / dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496
497 dd->mode = SPI_DMOV_MODE;
498
Alok Chauhanc27843e2013-02-15 16:04:20 +0530499 if (tx_num_rows) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 /* src in 16 MSB, dst in 16 LSB */
501 box = &dd->tx_dmov_cmd->box;
502 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530503 box->src_dst_len
504 = (dd->output_burst_size << 16) | dd->output_burst_size;
505 box->num_rows = (tx_num_rows << 16) | tx_num_rows;
506 box->row_offset = (dd->output_burst_size << 16) | 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507
Alok Chauhanc27843e2013-02-15 16:04:20 +0530508 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
509 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
510 offsetof(struct spi_dmov_cmd, box));
511 } else {
512 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
513 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
514 offsetof(struct spi_dmov_cmd, single_pad));
515 }
516
517 if (rx_num_rows) {
518 /* src in 16 MSB, dst in 16 LSB */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519 box = &dd->rx_dmov_cmd->box;
520 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530521 box->src_dst_len
522 = (dd->input_burst_size << 16) | dd->input_burst_size;
523 box->num_rows = (rx_num_rows << 16) | rx_num_rows;
524 box->row_offset = (0 << 16) | dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
527 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
528 offsetof(struct spi_dmov_cmd, box));
529 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
531 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
532 offsetof(struct spi_dmov_cmd, single_pad));
533 }
534
Alok Chauhanc27843e2013-02-15 16:04:20 +0530535 if (!dd->tx_unaligned_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 } else {
538 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530539 u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540
541 if ((dd->multi_xfr) && (dd->read_len <= 0))
Alok Chauhanc27843e2013-02-15 16:04:20 +0530542 tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543
544 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545
Alok Chauhanc27843e2013-02-15 16:04:20 +0530546 memset(dd->tx_padding, 0, dd->output_burst_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547 if (dd->write_buf)
Alok Chauhanc27843e2013-02-15 16:04:20 +0530548 memcpy(dd->tx_padding, dd->write_buf + tx_offset,
549 dd->tx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550
551 tx_cmd->src = dd->tx_padding_dma;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530552 tx_cmd->len = dd->output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 }
Alok Chauhanc27843e2013-02-15 16:04:20 +0530554
555 if (!dd->rx_unaligned_len) {
556 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
557 } else {
558 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
559 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
560
561 memset(dd->rx_padding, 0, dd->input_burst_size);
562 rx_cmd->dst = dd->rx_padding_dma;
563 rx_cmd->len = dd->input_burst_size;
564 }
565
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 /* This also takes care of the padding dummy buf
567 Since this is set to the correct length, the
568 dummy bytes won't be actually sent */
569 if (dd->multi_xfr) {
570 u32 write_transfers = 0;
571 u32 read_transfers = 0;
572
573 if (dd->write_len > 0) {
574 write_transfers = DIV_ROUND_UP(dd->write_len,
575 dd->bytes_per_word);
576 writel_relaxed(write_transfers,
577 dd->base + SPI_MX_OUTPUT_COUNT);
578 }
579 if (dd->read_len > 0) {
580 /*
581 * The read following a write transfer must take
582 * into account, that the bytes pertaining to
583 * the write transfer needs to be discarded,
584 * before the actual read begins.
585 */
586 read_transfers = DIV_ROUND_UP(dd->read_len +
587 dd->write_len,
588 dd->bytes_per_word);
589 writel_relaxed(read_transfers,
590 dd->base + SPI_MX_INPUT_COUNT);
591 }
592 } else {
593 if (dd->write_buf)
594 writel_relaxed(num_transfers,
595 dd->base + SPI_MX_OUTPUT_COUNT);
596 if (dd->read_buf)
597 writel_relaxed(num_transfers,
598 dd->base + SPI_MX_INPUT_COUNT);
599 }
600}
601
602static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
603{
604 dma_coherent_pre_ops();
605 if (dd->write_buf)
606 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
607 if (dd->read_buf)
608 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
609}
610
Kiran Gunda2b285652012-07-30 13:22:39 +0530611/* SPI core on targets that does not support infinite mode can send
612 maximum of 4K transfers or 64K transfers depending up on size of
613 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
614 chunks. Upon completion we send the next chunk, or complete the
615 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530616 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617*/
618static int msm_spi_dm_send_next(struct msm_spi *dd)
619{
620 /* By now we should have sent all the bytes in FIFO mode,
621 * However to make things right, we'll check anyway.
622 */
623 if (dd->mode != SPI_DMOV_MODE)
624 return 0;
625
Kiran Gundae8f16742012-06-27 10:06:32 +0530626 /* On targets which does not support infinite mode,
627 We need to send more chunks, if we sent max last time */
Kiran Gunda2b285652012-07-30 13:22:39 +0530628 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
629 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
631 return 0;
632 dd->read_len = dd->write_len = 0;
633 msm_spi_setup_dm_transfer(dd);
634 msm_spi_enqueue_dm_commands(dd);
635 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
636 return 0;
637 return 1;
638 } else if (dd->read_len && dd->write_len) {
639 dd->tx_bytes_remaining -= dd->cur_transfer->len;
640 if (list_is_last(&dd->cur_transfer->transfer_list,
641 &dd->cur_msg->transfers))
642 return 0;
643 get_next_transfer(dd);
644 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
645 return 0;
646 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
647 dd->read_buf = dd->temp_buf;
648 dd->read_len = dd->write_len = -1;
649 msm_spi_setup_dm_transfer(dd);
650 msm_spi_enqueue_dm_commands(dd);
651 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
652 return 0;
653 return 1;
654 }
655 return 0;
656}
657
658static inline void msm_spi_ack_transfer(struct msm_spi *dd)
659{
660 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
661 SPI_OP_MAX_OUTPUT_DONE_FLAG,
662 dd->base + SPI_OPERATIONAL);
663 /* Ensure done flag was cleared before proceeding further */
664 mb();
665}
666
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700667/* Figure which irq occured and call the relevant functions */
668static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
669{
670 u32 op, ret = IRQ_NONE;
671 struct msm_spi *dd = dev_id;
672
Alok Chauhan2a647c92013-03-12 18:34:43 +0530673 if (pm_runtime_suspended(dd->dev)) {
674 dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
675 return ret;
676 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700677 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
678 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
679 struct spi_master *master = dev_get_drvdata(dd->dev);
680 ret |= msm_spi_error_irq(irq, master);
681 }
682
683 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
684 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
685 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
686 dd->base + SPI_OPERATIONAL);
687 /*
688 * Ensure service flag was cleared before further
689 * processing of interrupt.
690 */
691 mb();
692 ret |= msm_spi_input_irq(irq, dev_id);
693 }
694
695 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
696 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
697 dd->base + SPI_OPERATIONAL);
698 /*
699 * Ensure service flag was cleared before further
700 * processing of interrupt.
701 */
702 mb();
703 ret |= msm_spi_output_irq(irq, dev_id);
704 }
705
706 if (dd->done) {
707 complete(&dd->transfer_complete);
708 dd->done = 0;
709 }
710 return ret;
711}
712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
714{
715 struct msm_spi *dd = dev_id;
716
717 dd->stat_rx++;
718
719 if (dd->mode == SPI_MODE_NONE)
720 return IRQ_HANDLED;
721
722 if (dd->mode == SPI_DMOV_MODE) {
723 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
724 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
725 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
726 msm_spi_ack_transfer(dd);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530727 if (dd->rx_unaligned_len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728 if (atomic_inc_return(&dd->rx_irq_called) == 1)
729 return IRQ_HANDLED;
730 }
731 msm_spi_complete(dd);
732 return IRQ_HANDLED;
733 }
734 return IRQ_NONE;
735 }
736
737 if (dd->mode == SPI_FIFO_MODE) {
738 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
739 SPI_OP_IP_FIFO_NOT_EMPTY) &&
740 (dd->rx_bytes_remaining > 0)) {
741 msm_spi_read_word_from_fifo(dd);
742 }
743 if (dd->rx_bytes_remaining == 0)
744 msm_spi_complete(dd);
745 }
746
747 return IRQ_HANDLED;
748}
749
750static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
751{
752 u32 word;
753 u8 byte;
754 int i;
755
756 word = 0;
757 if (dd->write_buf) {
758 for (i = 0; (i < dd->bytes_per_word) &&
759 dd->tx_bytes_remaining; i++) {
760 dd->tx_bytes_remaining--;
761 byte = *dd->write_buf++;
762 word |= (byte << (BITS_PER_BYTE * (3 - i)));
763 }
764 } else
765 if (dd->tx_bytes_remaining > dd->bytes_per_word)
766 dd->tx_bytes_remaining -= dd->bytes_per_word;
767 else
768 dd->tx_bytes_remaining = 0;
769 dd->write_xfr_cnt++;
770 if (dd->multi_xfr) {
771 if (!dd->tx_bytes_remaining)
772 dd->write_xfr_cnt = 0;
773 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
774 dd->write_len) {
775 struct spi_transfer *t = dd->cur_tx_transfer;
776 if (t->transfer_list.next != &dd->cur_msg->transfers) {
777 t = list_entry(t->transfer_list.next,
778 struct spi_transfer,
779 transfer_list);
780 dd->write_buf = t->tx_buf;
781 dd->write_len = t->len;
782 dd->write_xfr_cnt = 0;
783 dd->cur_tx_transfer = t;
784 }
785 }
786 }
787 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
788}
789
790static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
791{
792 int count = 0;
793
794 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
795 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
796 SPI_OP_OUTPUT_FIFO_FULL)) {
797 msm_spi_write_word_to_fifo(dd);
798 count++;
799 }
800}
801
802static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
803{
804 struct msm_spi *dd = dev_id;
805
806 dd->stat_tx++;
807
808 if (dd->mode == SPI_MODE_NONE)
809 return IRQ_HANDLED;
810
811 if (dd->mode == SPI_DMOV_MODE) {
812 /* TX_ONLY transaction is handled here
813 This is the only place we send complete at tx and not rx */
814 if (dd->read_buf == NULL &&
815 readl_relaxed(dd->base + SPI_OPERATIONAL) &
816 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
817 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530818 if (atomic_inc_return(&dd->tx_irq_called) == 1)
819 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 msm_spi_complete(dd);
821 return IRQ_HANDLED;
822 }
823 return IRQ_NONE;
824 }
825
826 /* Output FIFO is empty. Transmit any outstanding write data. */
827 if (dd->mode == SPI_FIFO_MODE)
828 msm_spi_write_rmn_to_fifo(dd);
829
830 return IRQ_HANDLED;
831}
832
833static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
834{
835 struct spi_master *master = dev_id;
836 struct msm_spi *dd = spi_master_get_devdata(master);
837 u32 spi_err;
838
839 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
840 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
841 dev_warn(master->dev.parent, "SPI output overrun error\n");
842 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
843 dev_warn(master->dev.parent, "SPI input underrun error\n");
844 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
845 dev_warn(master->dev.parent, "SPI output underrun error\n");
846 msm_spi_get_clk_err(dd, &spi_err);
847 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
848 dev_warn(master->dev.parent, "SPI clock overrun error\n");
849 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
850 dev_warn(master->dev.parent, "SPI clock underrun error\n");
851 msm_spi_clear_error_flags(dd);
852 msm_spi_ack_clk_err(dd);
853 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
854 mb();
855 return IRQ_HANDLED;
856}
857
858static int msm_spi_map_dma_buffers(struct msm_spi *dd)
859{
860 struct device *dev;
861 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600862 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 void *tx_buf, *rx_buf;
864 unsigned tx_len, rx_len;
865 int ret = -EINVAL;
866
867 dev = &dd->cur_msg->spi->dev;
868 first_xfr = dd->cur_transfer;
869 tx_buf = (void *)first_xfr->tx_buf;
870 rx_buf = first_xfr->rx_buf;
871 tx_len = rx_len = first_xfr->len;
872
873 /*
874 * For WR-WR and WR-RD transfers, we allocate our own temporary
875 * buffer and copy the data to/from the client buffers.
876 */
877 if (dd->multi_xfr) {
878 dd->temp_buf = kzalloc(dd->cur_msg_len,
879 GFP_KERNEL | __GFP_DMA);
880 if (!dd->temp_buf)
881 return -ENOMEM;
882 nxt_xfr = list_entry(first_xfr->transfer_list.next,
883 struct spi_transfer, transfer_list);
884
885 if (dd->write_len && !dd->read_len) {
886 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
887 goto error;
888
889 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
890 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
891 nxt_xfr->len);
892 tx_buf = dd->temp_buf;
893 tx_len = dd->cur_msg_len;
894 } else {
895 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
896 goto error;
897
898 rx_buf = dd->temp_buf;
899 rx_len = dd->cur_msg_len;
900 }
901 }
902 if (tx_buf != NULL) {
903 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
904 tx_len, DMA_TO_DEVICE);
905 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
906 dev_err(dev, "dma %cX %d bytes error\n",
907 'T', tx_len);
908 ret = -ENOMEM;
909 goto error;
910 }
911 }
912 if (rx_buf != NULL) {
913 dma_addr_t dma_handle;
914 dma_handle = dma_map_single(dev, rx_buf,
915 rx_len, DMA_FROM_DEVICE);
916 if (dma_mapping_error(NULL, dma_handle)) {
917 dev_err(dev, "dma %cX %d bytes error\n",
918 'R', rx_len);
919 if (tx_buf != NULL)
920 dma_unmap_single(NULL, first_xfr->tx_dma,
921 tx_len, DMA_TO_DEVICE);
922 ret = -ENOMEM;
923 goto error;
924 }
925 if (dd->multi_xfr)
926 nxt_xfr->rx_dma = dma_handle;
927 else
928 first_xfr->rx_dma = dma_handle;
929 }
930 return 0;
931
932error:
933 kfree(dd->temp_buf);
934 dd->temp_buf = NULL;
935 return ret;
936}
937
938static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
939{
940 struct device *dev;
941 u32 offset;
942
943 dev = &dd->cur_msg->spi->dev;
944 if (dd->cur_msg->is_dma_mapped)
945 goto unmap_end;
946
947 if (dd->multi_xfr) {
948 if (dd->write_len && !dd->read_len) {
949 dma_unmap_single(dev,
950 dd->cur_transfer->tx_dma,
951 dd->cur_msg_len,
952 DMA_TO_DEVICE);
953 } else {
954 struct spi_transfer *prev_xfr;
955 prev_xfr = list_entry(
956 dd->cur_transfer->transfer_list.prev,
957 struct spi_transfer,
958 transfer_list);
959 if (dd->cur_transfer->rx_buf) {
960 dma_unmap_single(dev,
961 dd->cur_transfer->rx_dma,
962 dd->cur_msg_len,
963 DMA_FROM_DEVICE);
964 }
965 if (prev_xfr->tx_buf) {
966 dma_unmap_single(dev,
967 prev_xfr->tx_dma,
968 prev_xfr->len,
969 DMA_TO_DEVICE);
970 }
Alok Chauhanc27843e2013-02-15 16:04:20 +0530971 if (dd->rx_unaligned_len && dd->read_buf) {
972 offset = dd->cur_msg_len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 dma_coherent_post_ops();
974 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhanc27843e2013-02-15 16:04:20 +0530975 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 memcpy(dd->cur_transfer->rx_buf,
977 dd->read_buf + prev_xfr->len,
978 dd->cur_transfer->len);
979 }
980 }
981 kfree(dd->temp_buf);
982 dd->temp_buf = NULL;
983 return;
984 } else {
985 if (dd->cur_transfer->rx_buf)
986 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
987 dd->cur_transfer->len,
988 DMA_FROM_DEVICE);
989 if (dd->cur_transfer->tx_buf)
990 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
991 dd->cur_transfer->len,
992 DMA_TO_DEVICE);
993 }
994
995unmap_end:
996 /* If we padded the transfer, we copy it from the padding buf */
Alok Chauhanc27843e2013-02-15 16:04:20 +0530997 if (dd->rx_unaligned_len && dd->read_buf) {
998 offset = dd->cur_transfer->len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999 dma_coherent_post_ops();
1000 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhanc27843e2013-02-15 16:04:20 +05301001 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002 }
1003}
1004
1005/**
1006 * msm_use_dm - decides whether to use data mover for this
1007 * transfer
1008 * @dd: device
1009 * @tr: transfer
1010 *
1011 * Start using DM if:
1012 * 1. Transfer is longer than 3*block size.
1013 * 2. Buffers should be aligned to cache line.
1014 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
1015 */
1016static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
1017 u8 bpw)
1018{
1019 u32 cache_line = dma_get_cache_alignment();
1020
1021 if (!dd->use_dma)
1022 return 0;
1023
1024 if (dd->cur_msg_len < 3*dd->input_block_size)
1025 return 0;
1026
1027 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
1028 return 0;
1029
1030 if (tr->tx_buf) {
1031 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1032 return 0;
1033 }
1034 if (tr->rx_buf) {
1035 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1036 return 0;
1037 }
1038
1039 if (tr->cs_change &&
Kiran Gundaedd3e172013-04-16 11:58:45 +05301040 ((bpw != 8) && (bpw != 16) && (bpw != 32)))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041 return 0;
1042 return 1;
1043}
1044
1045static void msm_spi_process_transfer(struct msm_spi *dd)
1046{
1047 u8 bpw;
1048 u32 spi_ioc;
1049 u32 spi_iom;
1050 u32 spi_ioc_orig;
1051 u32 max_speed;
1052 u32 chip_select;
1053 u32 read_count;
1054 u32 timeout;
1055 u32 int_loopback = 0;
1056
1057 dd->tx_bytes_remaining = dd->cur_msg_len;
1058 dd->rx_bytes_remaining = dd->cur_msg_len;
1059 dd->read_buf = dd->cur_transfer->rx_buf;
1060 dd->write_buf = dd->cur_transfer->tx_buf;
1061 init_completion(&dd->transfer_complete);
1062 if (dd->cur_transfer->bits_per_word)
1063 bpw = dd->cur_transfer->bits_per_word;
1064 else
1065 if (dd->cur_msg->spi->bits_per_word)
1066 bpw = dd->cur_msg->spi->bits_per_word;
1067 else
1068 bpw = 8;
1069 dd->bytes_per_word = (bpw + 7) / 8;
1070
1071 if (dd->cur_transfer->speed_hz)
1072 max_speed = dd->cur_transfer->speed_hz;
1073 else
1074 max_speed = dd->cur_msg->spi->max_speed_hz;
1075 if (!dd->clock_speed || max_speed != dd->clock_speed)
1076 msm_spi_clock_set(dd, max_speed);
1077
1078 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1079 if (dd->cur_msg->spi->mode & SPI_LOOP)
1080 int_loopback = 1;
1081 if (int_loopback && dd->multi_xfr &&
1082 (read_count > dd->input_fifo_size)) {
1083 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001084 pr_err(
1085 "%s:Internal Loopback does not support > fifo size"
1086 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 __func__);
1088 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001089 pr_err(
1090 "%s:Internal Loopback does not support > fifo size"
1091 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092 __func__);
1093 return;
1094 }
1095 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1096 dd->mode = SPI_FIFO_MODE;
1097 if (dd->multi_xfr) {
1098 dd->read_len = dd->cur_transfer->len;
1099 dd->write_len = dd->cur_transfer->len;
1100 }
1101 /* read_count cannot exceed fifo_size, and only one READ COUNT
1102 interrupt is generated per transaction, so for transactions
1103 larger than fifo size READ COUNT must be disabled.
1104 For those transactions we usually move to Data Mover mode.
1105 */
1106 if (read_count <= dd->input_fifo_size) {
1107 writel_relaxed(read_count,
1108 dd->base + SPI_MX_READ_COUNT);
1109 msm_spi_set_write_count(dd, read_count);
1110 } else {
1111 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1112 msm_spi_set_write_count(dd, 0);
1113 }
1114 } else {
1115 dd->mode = SPI_DMOV_MODE;
1116 if (dd->write_len && dd->read_len) {
1117 dd->tx_bytes_remaining = dd->write_len;
1118 dd->rx_bytes_remaining = dd->read_len;
1119 }
1120 }
1121
1122 /* Write mode - fifo or data mover*/
1123 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1124 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1125 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1126 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1127 /* Turn on packing for data mover */
1128 if (dd->mode == SPI_DMOV_MODE)
1129 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1130 else
1131 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1132 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1133
1134 msm_spi_set_config(dd, bpw);
1135
1136 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1137 spi_ioc_orig = spi_ioc;
1138 if (dd->cur_msg->spi->mode & SPI_CPOL)
1139 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1140 else
1141 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1142 chip_select = dd->cur_msg->spi->chip_select << 2;
1143 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1144 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1145 if (!dd->cur_transfer->cs_change)
1146 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1147 if (spi_ioc != spi_ioc_orig)
1148 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1149
1150 if (dd->mode == SPI_DMOV_MODE) {
1151 msm_spi_setup_dm_transfer(dd);
1152 msm_spi_enqueue_dm_commands(dd);
1153 }
1154 /* The output fifo interrupt handler will handle all writes after
1155 the first. Restricting this to one write avoids contention
1156 issues and race conditions between this thread and the int handler
1157 */
1158 else if (dd->mode == SPI_FIFO_MODE) {
1159 if (msm_spi_prepare_for_write(dd))
1160 goto transfer_end;
1161 msm_spi_start_write(dd, read_count);
1162 }
1163
1164 /* Only enter the RUN state after the first word is written into
1165 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1166 might fire before the first word is written resulting in a
1167 possible race condition.
1168 */
1169 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1170 goto transfer_end;
1171
1172 timeout = 100 * msecs_to_jiffies(
1173 DIV_ROUND_UP(dd->cur_msg_len * 8,
1174 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1175
1176 /* Assume success, this might change later upon transaction result */
1177 dd->cur_msg->status = 0;
1178 do {
1179 if (!wait_for_completion_timeout(&dd->transfer_complete,
1180 timeout)) {
1181 dev_err(dd->dev, "%s: SPI transaction "
1182 "timeout\n", __func__);
1183 dd->cur_msg->status = -EIO;
1184 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001185 msm_dmov_flush(dd->tx_dma_chan, 1);
1186 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 }
1188 break;
1189 }
1190 } while (msm_spi_dm_send_next(dd));
1191
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001192 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193transfer_end:
1194 if (dd->mode == SPI_DMOV_MODE)
1195 msm_spi_unmap_dma_buffers(dd);
1196 dd->mode = SPI_MODE_NONE;
1197
1198 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1199 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1200 dd->base + SPI_IO_CONTROL);
1201}
1202
1203static void get_transfer_length(struct msm_spi *dd)
1204{
1205 struct spi_transfer *tr;
1206 int num_xfrs = 0;
1207 int readlen = 0;
1208 int writelen = 0;
1209
1210 dd->cur_msg_len = 0;
1211 dd->multi_xfr = 0;
1212 dd->read_len = dd->write_len = 0;
1213
1214 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1215 if (tr->tx_buf)
1216 writelen += tr->len;
1217 if (tr->rx_buf)
1218 readlen += tr->len;
1219 dd->cur_msg_len += tr->len;
1220 num_xfrs++;
1221 }
1222
1223 if (num_xfrs == 2) {
1224 struct spi_transfer *first_xfr = dd->cur_transfer;
1225
1226 dd->multi_xfr = 1;
1227 tr = list_entry(first_xfr->transfer_list.next,
1228 struct spi_transfer,
1229 transfer_list);
1230 /*
1231 * We update dd->read_len and dd->write_len only
1232 * for WR-WR and WR-RD transfers.
1233 */
1234 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1235 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1236 ((!tr->tx_buf) && (tr->rx_buf))) {
1237 dd->read_len = readlen;
1238 dd->write_len = writelen;
1239 }
1240 }
1241 } else if (num_xfrs > 1)
1242 dd->multi_xfr = 1;
1243}
1244
1245static inline int combine_transfers(struct msm_spi *dd)
1246{
1247 struct spi_transfer *t = dd->cur_transfer;
1248 struct spi_transfer *nxt;
1249 int xfrs_grped = 1;
1250
1251 dd->cur_msg_len = dd->cur_transfer->len;
1252 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1253 nxt = list_entry(t->transfer_list.next,
1254 struct spi_transfer,
1255 transfer_list);
1256 if (t->cs_change != nxt->cs_change)
1257 return xfrs_grped;
1258 dd->cur_msg_len += nxt->len;
1259 xfrs_grped++;
1260 t = nxt;
1261 }
1262 return xfrs_grped;
1263}
1264
Harini Jayaraman093938a2012-04-20 15:33:23 -06001265static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1266{
1267 u32 spi_ioc;
1268 u32 spi_ioc_orig;
1269
1270 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1271 spi_ioc_orig = spi_ioc;
1272 if (set_flag)
1273 spi_ioc |= SPI_IO_C_FORCE_CS;
1274 else
1275 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1276
1277 if (spi_ioc != spi_ioc_orig)
1278 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1279}
1280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281static void msm_spi_process_message(struct msm_spi *dd)
1282{
1283 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001284 int cs_num;
1285 int rc;
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001286 bool xfer_delay = false;
1287 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001290 cs_num = dd->cur_msg->spi->chip_select;
1291 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1292 (!(dd->cs_gpios[cs_num].valid)) &&
1293 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1294 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1295 spi_cs_rsrcs[cs_num]);
1296 if (rc) {
1297 dev_err(dd->dev, "gpio_request for pin %d failed with "
1298 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1299 rc);
1300 return;
1301 }
1302 dd->cs_gpios[cs_num].valid = 1;
1303 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001305 list_for_each_entry(tr,
1306 &dd->cur_msg->transfers,
1307 transfer_list) {
1308 if (tr->delay_usecs) {
1309 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1310 tr->delay_usecs);
1311 xfer_delay = true;
1312 break;
1313 }
1314 }
1315
1316 /* Don't combine xfers if delay is needed after every xfer */
1317 if (dd->qup_ver || xfer_delay) {
1318 if (dd->qup_ver)
1319 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001321 &dd->cur_msg->transfers,
1322 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001323 struct spi_transfer *t = dd->cur_transfer;
1324 struct spi_transfer *nxt;
1325
1326 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1327 nxt = list_entry(t->transfer_list.next,
1328 struct spi_transfer,
1329 transfer_list);
1330
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001331 if (dd->qup_ver &&
1332 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001333 write_force_cs(dd, 1);
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001334 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001335 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001336 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001337
1338 dd->cur_msg_len = dd->cur_transfer->len;
1339 msm_spi_process_transfer(dd);
1340 }
1341 } else {
1342 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1343 struct spi_transfer,
1344 transfer_list);
1345 get_transfer_length(dd);
1346 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1347 /*
1348 * Handling of multi-transfers.
1349 * FIFO mode is used by default
1350 */
1351 list_for_each_entry(dd->cur_transfer,
1352 &dd->cur_msg->transfers,
1353 transfer_list) {
1354 if (!dd->cur_transfer->len)
1355 goto error;
1356 if (xfrs_grped) {
1357 xfrs_grped--;
1358 continue;
1359 } else {
1360 dd->read_len = dd->write_len = 0;
1361 xfrs_grped = combine_transfers(dd);
1362 }
1363
1364 dd->cur_tx_transfer = dd->cur_transfer;
1365 dd->cur_rx_transfer = dd->cur_transfer;
1366 msm_spi_process_transfer(dd);
1367 xfrs_grped--;
1368 }
1369 } else {
1370 /* Handling of a single transfer or
1371 * WR-WR or WR-RD transfers
1372 */
1373 if ((!dd->cur_msg->is_dma_mapped) &&
1374 (msm_use_dm(dd, dd->cur_transfer,
1375 dd->cur_transfer->bits_per_word))) {
1376 /* Mapping of DMA buffers */
1377 int ret = msm_spi_map_dma_buffers(dd);
1378 if (ret < 0) {
1379 dd->cur_msg->status = ret;
1380 goto error;
1381 }
1382 }
1383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001384 dd->cur_tx_transfer = dd->cur_transfer;
1385 dd->cur_rx_transfer = dd->cur_transfer;
1386 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001387 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001389
1390 return;
1391
1392error:
1393 if (dd->cs_gpios[cs_num].valid) {
1394 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1395 dd->cs_gpios[cs_num].valid = 0;
1396 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397}
1398
1399/* workqueue - pull messages from queue & process */
1400static void msm_spi_workq(struct work_struct *work)
1401{
1402 struct msm_spi *dd =
1403 container_of(work, struct msm_spi, work_data);
1404 unsigned long flags;
1405 u32 status_error = 0;
Alok Chauhan2a647c92013-03-12 18:34:43 +05301406
1407 pm_runtime_get_sync(dd->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408
1409 mutex_lock(&dd->core_lock);
1410
Alok Chauhan2a647c92013-03-12 18:34:43 +05301411 /*
1412 * Counter-part of system-suspend when runtime-pm is not enabled.
1413 * This way, resume can be left empty and device will be put in
1414 * active mode only if client requests anything on the bus
1415 */
1416 if (!pm_runtime_enabled(dd->dev))
1417 msm_spi_pm_resume_runtime(dd->dev);
1418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419 if (dd->use_rlock)
1420 remote_mutex_lock(&dd->r_lock);
1421
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 if (!msm_spi_is_valid_state(dd)) {
1423 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1424 __func__);
1425 status_error = 1;
1426 }
1427
1428 spin_lock_irqsave(&dd->queue_lock, flags);
Alok Chauhan2a647c92013-03-12 18:34:43 +05301429 dd->transfer_pending = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001430 while (!list_empty(&dd->queue)) {
1431 dd->cur_msg = list_entry(dd->queue.next,
1432 struct spi_message, queue);
1433 list_del_init(&dd->cur_msg->queue);
1434 spin_unlock_irqrestore(&dd->queue_lock, flags);
1435 if (status_error)
1436 dd->cur_msg->status = -EIO;
1437 else
1438 msm_spi_process_message(dd);
1439 if (dd->cur_msg->complete)
1440 dd->cur_msg->complete(dd->cur_msg->context);
1441 spin_lock_irqsave(&dd->queue_lock, flags);
1442 }
1443 dd->transfer_pending = 0;
1444 spin_unlock_irqrestore(&dd->queue_lock, flags);
1445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 if (dd->use_rlock)
1447 remote_mutex_unlock(&dd->r_lock);
1448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001449 mutex_unlock(&dd->core_lock);
Alok Chauhan2a647c92013-03-12 18:34:43 +05301450
1451 pm_runtime_mark_last_busy(dd->dev);
1452 pm_runtime_put_autosuspend(dd->dev);
1453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 /* If needed, this can be done after the current message is complete,
1455 and work can be continued upon resume. No motivation for now. */
1456 if (dd->suspended)
1457 wake_up_interruptible(&dd->continue_suspend);
1458}
1459
1460static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1461{
1462 struct msm_spi *dd;
1463 unsigned long flags;
1464 struct spi_transfer *tr;
1465
1466 dd = spi_master_get_devdata(spi->master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001467
1468 if (list_empty(&msg->transfers) || !msg->complete)
1469 return -EINVAL;
1470
1471 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1472 /* Check message parameters */
1473 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1474 (tr->bits_per_word &&
1475 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1476 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1477 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1478 "tx=%p, rx=%p\n",
1479 tr->speed_hz, tr->bits_per_word,
1480 tr->tx_buf, tr->rx_buf);
1481 return -EINVAL;
1482 }
1483 }
1484
1485 spin_lock_irqsave(&dd->queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 list_add_tail(&msg->queue, &dd->queue);
1487 spin_unlock_irqrestore(&dd->queue_lock, flags);
1488 queue_work(dd->workqueue, &dd->work_data);
1489 return 0;
1490}
1491
1492static int msm_spi_setup(struct spi_device *spi)
1493{
1494 struct msm_spi *dd;
1495 int rc = 0;
1496 u32 spi_ioc;
1497 u32 spi_config;
1498 u32 mask;
1499
1500 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1501 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1502 __func__, spi->bits_per_word);
1503 rc = -EINVAL;
1504 }
1505 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1506 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1507 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1508 rc = -EINVAL;
1509 }
1510
1511 if (rc)
1512 goto err_setup_exit;
1513
1514 dd = spi_master_get_devdata(spi->master);
1515
Alok Chauhan2a647c92013-03-12 18:34:43 +05301516 pm_runtime_get_sync(dd->dev);
1517
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 mutex_lock(&dd->core_lock);
Alok Chauhan2a647c92013-03-12 18:34:43 +05301519
1520 /* Counter-part of system-suspend when runtime-pm is not enabled. */
1521 if (!pm_runtime_enabled(dd->dev))
1522 msm_spi_pm_resume_runtime(dd->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523
1524 if (dd->use_rlock)
1525 remote_mutex_lock(&dd->r_lock);
1526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001527 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1528 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1529 if (spi->mode & SPI_CS_HIGH)
1530 spi_ioc |= mask;
1531 else
1532 spi_ioc &= ~mask;
1533 if (spi->mode & SPI_CPOL)
1534 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1535 else
1536 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1537
1538 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1539
1540 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1541 if (spi->mode & SPI_LOOP)
1542 spi_config |= SPI_CFG_LOOPBACK;
1543 else
1544 spi_config &= ~SPI_CFG_LOOPBACK;
1545 if (spi->mode & SPI_CPHA)
1546 spi_config &= ~SPI_CFG_INPUT_FIRST;
1547 else
1548 spi_config |= SPI_CFG_INPUT_FIRST;
1549 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1550
1551 /* Ensure previous write completed before disabling the clocks */
1552 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001553
1554 if (dd->use_rlock)
1555 remote_mutex_unlock(&dd->r_lock);
Alok Chauhan2a647c92013-03-12 18:34:43 +05301556
1557 /* Counter-part of system-resume when runtime-pm is not enabled. */
1558 if (!pm_runtime_enabled(dd->dev))
1559 msm_spi_pm_suspend_runtime(dd->dev);
1560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001561 mutex_unlock(&dd->core_lock);
Alok Chauhan2a647c92013-03-12 18:34:43 +05301562
1563 pm_runtime_mark_last_busy(dd->dev);
1564 pm_runtime_put_autosuspend(dd->dev);
1565
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001566err_setup_exit:
1567 return rc;
1568}
1569
1570#ifdef CONFIG_DEBUG_FS
1571static int debugfs_iomem_x32_set(void *data, u64 val)
1572{
1573 writel_relaxed(val, data);
1574 /* Ensure the previous write completed. */
1575 mb();
1576 return 0;
1577}
1578
1579static int debugfs_iomem_x32_get(void *data, u64 *val)
1580{
1581 *val = readl_relaxed(data);
1582 /* Ensure the previous read completed. */
1583 mb();
1584 return 0;
1585}
1586
1587DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1588 debugfs_iomem_x32_set, "0x%08llx\n");
1589
1590static void spi_debugfs_init(struct msm_spi *dd)
1591{
1592 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1593 if (dd->dent_spi) {
1594 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1597 dd->debugfs_spi_regs[i] =
1598 debugfs_create_file(
1599 debugfs_spi_regs[i].name,
1600 debugfs_spi_regs[i].mode,
1601 dd->dent_spi,
1602 dd->base + debugfs_spi_regs[i].offset,
1603 &fops_iomem_x32);
1604 }
1605 }
1606}
1607
1608static void spi_debugfs_exit(struct msm_spi *dd)
1609{
1610 if (dd->dent_spi) {
1611 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001612
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 debugfs_remove_recursive(dd->dent_spi);
1614 dd->dent_spi = NULL;
1615 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1616 dd->debugfs_spi_regs[i] = NULL;
1617 }
1618}
1619#else
1620static void spi_debugfs_init(struct msm_spi *dd) {}
1621static void spi_debugfs_exit(struct msm_spi *dd) {}
1622#endif
1623
1624/* ===Device attributes begin=== */
1625static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1626 char *buf)
1627{
1628 struct spi_master *master = dev_get_drvdata(dev);
1629 struct msm_spi *dd = spi_master_get_devdata(master);
1630
1631 return snprintf(buf, PAGE_SIZE,
1632 "Device %s\n"
1633 "rx fifo_size = %d spi words\n"
1634 "tx fifo_size = %d spi words\n"
1635 "use_dma ? %s\n"
1636 "rx block size = %d bytes\n"
1637 "tx block size = %d bytes\n"
Alok Chauhanc27843e2013-02-15 16:04:20 +05301638 "input burst size = %d bytes\n"
1639 "output burst size = %d bytes\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640 "DMA configuration:\n"
1641 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1642 "--statistics--\n"
1643 "Rx isrs = %d\n"
1644 "Tx isrs = %d\n"
1645 "DMA error = %d\n"
1646 "--debug--\n"
1647 "NA yet\n",
1648 dev_name(dev),
1649 dd->input_fifo_size,
1650 dd->output_fifo_size,
1651 dd->use_dma ? "yes" : "no",
1652 dd->input_block_size,
1653 dd->output_block_size,
Alok Chauhanc27843e2013-02-15 16:04:20 +05301654 dd->input_burst_size,
1655 dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656 dd->tx_dma_chan,
1657 dd->rx_dma_chan,
1658 dd->tx_dma_crci,
1659 dd->rx_dma_crci,
1660 dd->stat_rx + dd->stat_dmov_rx,
1661 dd->stat_tx + dd->stat_dmov_tx,
1662 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1663 );
1664}
1665
1666/* Reset statistics on write */
1667static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1668 const char *buf, size_t count)
1669{
1670 struct msm_spi *dd = dev_get_drvdata(dev);
1671 dd->stat_rx = 0;
1672 dd->stat_tx = 0;
1673 dd->stat_dmov_rx = 0;
1674 dd->stat_dmov_tx = 0;
1675 dd->stat_dmov_rx_err = 0;
1676 dd->stat_dmov_tx_err = 0;
1677 return count;
1678}
1679
1680static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1681
1682static struct attribute *dev_attrs[] = {
1683 &dev_attr_stats.attr,
1684 NULL,
1685};
1686
1687static struct attribute_group dev_attr_grp = {
1688 .attrs = dev_attrs,
1689};
1690/* ===Device attributes end=== */
1691
1692/**
1693 * spi_dmov_tx_complete_func - DataMover tx completion callback
1694 *
1695 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1696 * spinlock @msm_dmov_lock held.
1697 */
1698static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1699 unsigned int result,
1700 struct msm_dmov_errdata *err)
1701{
1702 struct msm_spi *dd;
1703
1704 if (!(result & DMOV_RSLT_VALID)) {
1705 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1706 return;
1707 }
1708 /* restore original context */
1709 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301710 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001711 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301712 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1713 return;
1714 complete(&dd->transfer_complete);
1715 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001716 /* Error or flush */
1717 if (result & DMOV_RSLT_ERROR) {
1718 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1719 dd->stat_dmov_tx_err++;
1720 }
1721 if (result & DMOV_RSLT_FLUSH) {
1722 /*
1723 * Flushing normally happens in process of
1724 * removing, when we are waiting for outstanding
1725 * DMA commands to be flushed.
1726 */
1727 dev_info(dd->dev,
1728 "DMA channel flushed (0x%08x)\n", result);
1729 }
1730 if (err)
1731 dev_err(dd->dev,
1732 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1733 err->flush[0], err->flush[1], err->flush[2],
1734 err->flush[3], err->flush[4], err->flush[5]);
1735 dd->cur_msg->status = -EIO;
1736 complete(&dd->transfer_complete);
1737 }
1738}
1739
1740/**
1741 * spi_dmov_rx_complete_func - DataMover rx completion callback
1742 *
1743 * Executed in IRQ context (Data Mover's IRQ)
1744 * DataMover's spinlock @msm_dmov_lock held.
1745 */
1746static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1747 unsigned int result,
1748 struct msm_dmov_errdata *err)
1749{
1750 struct msm_spi *dd;
1751
1752 if (!(result & DMOV_RSLT_VALID)) {
1753 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1754 result, cmd);
1755 return;
1756 }
1757 /* restore original context */
1758 dd = container_of(cmd, struct msm_spi, rx_hdr);
1759 if (result & DMOV_RSLT_DONE) {
1760 dd->stat_dmov_rx++;
1761 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1762 return;
1763 complete(&dd->transfer_complete);
1764 } else {
1765 /** Error or flush */
1766 if (result & DMOV_RSLT_ERROR) {
1767 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1768 dd->stat_dmov_rx_err++;
1769 }
1770 if (result & DMOV_RSLT_FLUSH) {
1771 dev_info(dd->dev,
1772 "DMA channel flushed(0x%08x)\n", result);
1773 }
1774 if (err)
1775 dev_err(dd->dev,
1776 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1777 err->flush[0], err->flush[1], err->flush[2],
1778 err->flush[3], err->flush[4], err->flush[5]);
1779 dd->cur_msg->status = -EIO;
1780 complete(&dd->transfer_complete);
1781 }
1782}
1783
Alok Chauhanc27843e2013-02-15 16:04:20 +05301784static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
1785 int output_burst_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786{
1787 u32 cache_line = dma_get_cache_alignment();
Alok Chauhanc27843e2013-02-15 16:04:20 +05301788 int burst_size = (input_burst_size > output_burst_size) ?
1789 input_burst_size : output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001790
1791 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
Alok Chauhanc27843e2013-02-15 16:04:20 +05301792 roundup(burst_size, cache_line))*2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793}
1794
1795static void msm_spi_teardown_dma(struct msm_spi *dd)
1796{
1797 int limit = 0;
1798
1799 if (!dd->use_dma)
1800 return;
1801
1802 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001803 msm_dmov_flush(dd->tx_dma_chan, 1);
1804 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 msleep(10);
1806 }
1807
Alok Chauhanc27843e2013-02-15 16:04:20 +05301808 dma_free_coherent(NULL,
1809 get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
1810 dd->tx_dmov_cmd,
1811 dd->tx_dmov_cmd_dma);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001812 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1813 dd->tx_padding = dd->rx_padding = NULL;
1814}
1815
1816static __init int msm_spi_init_dma(struct msm_spi *dd)
1817{
1818 dmov_box *box;
1819 u32 cache_line = dma_get_cache_alignment();
1820
1821 /* Allocate all as one chunk, since all is smaller than page size */
1822
1823 /* We send NULL device, since it requires coherent_dma_mask id
1824 device definition, we're okay with using system pool */
Alok Chauhanc27843e2013-02-15 16:04:20 +05301825 dd->tx_dmov_cmd
1826 = dma_alloc_coherent(NULL,
1827 get_chunk_size(dd, dd->input_burst_size,
1828 dd->output_burst_size),
1829 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 if (dd->tx_dmov_cmd == NULL)
1831 return -ENOMEM;
1832
1833 /* DMA addresses should be 64 bit aligned aligned */
1834 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1835 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1836 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1837 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1838
1839 /* Buffers should be aligned to cache line */
1840 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1841 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1842 sizeof(struct spi_dmov_cmd), cache_line);
Alok Chauhanc27843e2013-02-15 16:04:20 +05301843 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
1844 dd->output_burst_size), cache_line);
1845 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846 cache_line);
1847
1848 /* Setup DM commands */
1849 box = &(dd->rx_dmov_cmd->box);
1850 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1851 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1852 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1853 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1854 offsetof(struct spi_dmov_cmd, cmd_ptr));
1855 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001856
1857 box = &(dd->tx_dmov_cmd->box);
1858 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1859 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1860 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1861 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1862 offsetof(struct spi_dmov_cmd, cmd_ptr));
1863 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864
1865 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1866 CMD_DST_CRCI(dd->tx_dma_crci);
1867 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1868 SPI_OUTPUT_FIFO;
1869 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1870 CMD_SRC_CRCI(dd->rx_dma_crci);
1871 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1872 SPI_INPUT_FIFO;
1873
1874 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001875 msm_dmov_flush(dd->tx_dma_chan, 1);
1876 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001877
1878 return 0;
1879}
1880
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001881struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1882{
1883 struct device_node *node = pdev->dev.of_node;
1884 struct msm_spi_platform_data *pdata;
1885
1886 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1887 if (!pdata) {
1888 pr_err("Unable to allocate platform data\n");
1889 return NULL;
1890 }
1891
1892 of_property_read_u32(node, "spi-max-frequency",
1893 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05301894 of_property_read_u32(node, "infinite_mode",
1895 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001896
1897 return pdata;
1898}
1899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001900static int __init msm_spi_probe(struct platform_device *pdev)
1901{
1902 struct spi_master *master;
1903 struct msm_spi *dd;
1904 struct resource *resource;
1905 int rc = -ENXIO;
1906 int locked = 0;
1907 int i = 0;
1908 int clk_enabled = 0;
1909 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001910 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001911 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001912
1913 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1914 if (!master) {
1915 rc = -ENOMEM;
1916 dev_err(&pdev->dev, "master allocation failed\n");
1917 goto err_probe_exit;
1918 }
1919
1920 master->bus_num = pdev->id;
1921 master->mode_bits = SPI_SUPPORTED_MODES;
1922 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1923 master->setup = msm_spi_setup;
1924 master->transfer = msm_spi_transfer;
1925 platform_set_drvdata(pdev, master);
1926 dd = spi_master_get_devdata(master);
1927
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001928 if (pdev->dev.of_node) {
1929 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1930 master->dev.of_node = pdev->dev.of_node;
1931 pdata = msm_spi_dt_to_pdata(pdev);
1932 if (!pdata) {
1933 rc = -ENOMEM;
1934 goto err_probe_exit;
1935 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001936
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06001937 rc = of_property_read_u32(pdev->dev.of_node,
1938 "cell-index", &pdev->id);
1939 if (rc)
1940 dev_warn(&pdev->dev,
1941 "using default bus_num %d\n", pdev->id);
1942 else
1943 master->bus_num = pdev->id;
1944
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001945 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1946 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1947 i, &flags);
1948 }
1949
1950 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1951 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1952 pdev->dev.of_node, "cs-gpios",
1953 i, &flags);
1954 dd->cs_gpios[i].valid = 0;
1955 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001956 } else {
1957 pdata = pdev->dev.platform_data;
1958 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001959
1960 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1961 resource = platform_get_resource(pdev, IORESOURCE_IO,
1962 i);
1963 dd->spi_gpios[i] = resource ? resource->start : -1;
1964 }
1965
1966 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1967 resource = platform_get_resource(pdev, IORESOURCE_IO,
1968 i + ARRAY_SIZE(spi_rsrcs));
1969 dd->cs_gpios[i].gpio_num = resource ?
1970 resource->start : -1;
1971 dd->cs_gpios[i].valid = 0;
1972 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001973 }
1974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001976 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977 if (!resource) {
1978 rc = -ENXIO;
1979 goto err_probe_res;
1980 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001982 dd->mem_phys_addr = resource->start;
1983 dd->mem_size = resource_size(resource);
1984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 if (pdata) {
1986 if (pdata->dma_config) {
1987 rc = pdata->dma_config();
1988 if (rc) {
1989 dev_warn(&pdev->dev,
1990 "%s: DM mode not supported\n",
1991 __func__);
1992 dd->use_dma = 0;
1993 goto skip_dma_resources;
1994 }
1995 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001996 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 if (resource) {
1998 dd->rx_dma_chan = resource->start;
1999 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002000 resource = platform_get_resource(pdev, IORESOURCE_DMA,
2001 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002002 if (!resource) {
2003 rc = -ENXIO;
2004 goto err_probe_res;
2005 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002006
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002007 dd->rx_dma_crci = resource->start;
2008 dd->tx_dma_crci = resource->end;
2009 dd->use_dma = 1;
2010 master->dma_alignment = dma_get_cache_alignment();
2011 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012 }
2013
Alok Chauhanb5f53792012-08-22 19:54:45 +05302014skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002016 spin_lock_init(&dd->queue_lock);
2017 mutex_init(&dd->core_lock);
2018 INIT_LIST_HEAD(&dd->queue);
2019 INIT_WORK(&dd->work_data, msm_spi_workq);
2020 init_waitqueue_head(&dd->continue_suspend);
2021 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002022 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002023 if (!dd->workqueue)
2024 goto err_probe_workq;
2025
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002026 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2027 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002028 rc = -ENXIO;
2029 goto err_probe_reqmem;
2030 }
2031
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002032 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2033 if (!dd->base) {
2034 rc = -ENOMEM;
2035 goto err_probe_reqmem;
2036 }
2037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002038 if (pdata && pdata->rsl_id) {
2039 struct remote_mutex_id rmid;
2040 rmid.r_spinlock_id = pdata->rsl_id;
2041 rmid.delay_us = SPI_TRYLOCK_DELAY;
2042
2043 rc = remote_mutex_init(&dd->r_lock, &rmid);
2044 if (rc) {
2045 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2046 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2047 __func__, rc);
2048 goto err_probe_rlock_init;
2049 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002051 dd->use_rlock = 1;
2052 dd->pm_lat = pdata->pm_lat;
Alok Chauhanb5f53792012-08-22 19:54:45 +05302053 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
2054 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002055 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057 mutex_lock(&dd->core_lock);
2058 if (dd->use_rlock)
2059 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002060
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002061 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002062 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002063 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002065 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002066 rc = PTR_ERR(dd->clk);
2067 goto err_probe_clk_get;
2068 }
2069
Matt Wagantallac294852011-08-17 15:44:58 -07002070 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002071 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002072 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002073 rc = PTR_ERR(dd->pclk);
2074 goto err_probe_pclk_get;
2075 }
2076
2077 if (pdata && pdata->max_clock_speed)
2078 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2079
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002080 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002082 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002083 __func__);
2084 goto err_probe_clk_enable;
2085 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002086
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002087 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002088 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002090 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002091 __func__);
2092 goto err_probe_pclk_enable;
2093 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002095 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002096 rc = msm_spi_configure_gsbi(dd, pdev);
2097 if (rc)
2098 goto err_probe_gsbi;
2099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002100 msm_spi_calculate_fifo_size(dd);
2101 if (dd->use_dma) {
2102 rc = msm_spi_init_dma(dd);
2103 if (rc)
2104 goto err_probe_dma;
2105 }
2106
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002107 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002108 /*
2109 * The SPI core generates a bogus input overrun error on some targets,
2110 * when a transition from run to reset state occurs and if the FIFO has
2111 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2112 * bit.
2113 */
2114 msm_spi_enable_error_flags(dd);
2115
2116 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2117 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2118 if (rc)
2119 goto err_probe_state;
2120
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002121 clk_disable_unprepare(dd->clk);
2122 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002123 clk_enabled = 0;
2124 pclk_enabled = 0;
2125
Alok Chauhan2a647c92013-03-12 18:34:43 +05302126 dd->suspended = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002127 dd->transfer_pending = 0;
2128 dd->multi_xfr = 0;
2129 dd->mode = SPI_MODE_NONE;
2130
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002131 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132 if (rc)
2133 goto err_probe_irq;
2134
2135 msm_spi_disable_irqs(dd);
2136 if (dd->use_rlock)
2137 remote_mutex_unlock(&dd->r_lock);
2138
2139 mutex_unlock(&dd->core_lock);
2140 locked = 0;
2141
Alok Chauhan2a647c92013-03-12 18:34:43 +05302142 pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
2143 pm_runtime_use_autosuspend(&pdev->dev);
2144 pm_runtime_enable(&pdev->dev);
2145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002146 rc = spi_register_master(master);
2147 if (rc)
2148 goto err_probe_reg_master;
2149
2150 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2151 if (rc) {
2152 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2153 goto err_attrs;
2154 }
2155
2156 spi_debugfs_init(dd);
Kiran Gunda2b285652012-07-30 13:22:39 +05302157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002158 return 0;
2159
2160err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002161 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002162err_probe_reg_master:
Alok Chauhan2a647c92013-03-12 18:34:43 +05302163 pm_runtime_disable(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002164err_probe_irq:
2165err_probe_state:
2166 msm_spi_teardown_dma(dd);
2167err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002168err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002169 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002170 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002171err_probe_pclk_enable:
2172 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002173 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174err_probe_clk_enable:
2175 clk_put(dd->pclk);
2176err_probe_pclk_get:
2177 clk_put(dd->clk);
2178err_probe_clk_get:
2179 if (locked) {
2180 if (dd->use_rlock)
2181 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002183 mutex_unlock(&dd->core_lock);
2184 }
2185err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186err_probe_reqmem:
2187 destroy_workqueue(dd->workqueue);
2188err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002189err_probe_res:
2190 spi_master_put(master);
2191err_probe_exit:
2192 return rc;
2193}
2194
2195#ifdef CONFIG_PM
Alok Chauhan2a647c92013-03-12 18:34:43 +05302196static int msm_spi_pm_suspend_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002197{
Alok Chauhan2a647c92013-03-12 18:34:43 +05302198 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan2a647c92013-03-12 18:34:43 +05302200 struct msm_spi *dd;
2201 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202
Alok Chauhan2a647c92013-03-12 18:34:43 +05302203 dev_dbg(device, "pm_runtime: suspending...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002204 if (!master)
2205 goto suspend_exit;
2206 dd = spi_master_get_devdata(master);
2207 if (!dd)
2208 goto suspend_exit;
2209
Alok Chauhan2a647c92013-03-12 18:34:43 +05302210 if (dd->suspended)
2211 return 0;
2212
2213 /*
2214 * Make sure nothing is added to the queue while we're
2215 * suspending
2216 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002217 spin_lock_irqsave(&dd->queue_lock, flags);
2218 dd->suspended = 1;
2219 spin_unlock_irqrestore(&dd->queue_lock, flags);
2220
2221 /* Wait for transactions to end, or time out */
Alok Chauhan2a647c92013-03-12 18:34:43 +05302222 wait_event_interruptible(dd->continue_suspend,
2223 !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002224
Alok Chauhan2a647c92013-03-12 18:34:43 +05302225 msm_spi_disable_irqs(dd);
2226 clk_disable_unprepare(dd->clk);
2227 clk_disable_unprepare(dd->pclk);
2228
2229 /* Free the spi clk, miso, mosi, cs gpio */
2230 if (dd->pdata && dd->pdata->gpio_release)
2231 dd->pdata->gpio_release();
2232
2233 msm_spi_free_gpios(dd);
2234
2235 if (pm_qos_request_active(&qos_req_list))
2236 pm_qos_update_request(&qos_req_list,
2237 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002238suspend_exit:
2239 return 0;
2240}
2241
Alok Chauhan2a647c92013-03-12 18:34:43 +05302242static int msm_spi_pm_resume_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002243{
Alok Chauhan2a647c92013-03-12 18:34:43 +05302244 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002245 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan2a647c92013-03-12 18:34:43 +05302246 struct msm_spi *dd;
2247 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002248
Alok Chauhan2a647c92013-03-12 18:34:43 +05302249 dev_dbg(device, "pm_runtime: resuming...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002250 if (!master)
2251 goto resume_exit;
2252 dd = spi_master_get_devdata(master);
2253 if (!dd)
2254 goto resume_exit;
2255
Alok Chauhan2a647c92013-03-12 18:34:43 +05302256 if (!dd->suspended)
2257 return 0;
2258
2259 if (pm_qos_request_active(&qos_req_list))
2260 pm_qos_update_request(&qos_req_list,
2261 dd->pm_lat);
2262
2263 /* Configure the spi clk, miso, mosi and cs gpio */
2264 if (dd->pdata->gpio_config) {
2265 ret = dd->pdata->gpio_config();
2266 if (ret) {
2267 dev_err(dd->dev,
2268 "%s: error configuring GPIOs\n",
2269 __func__);
2270 return ret;
2271 }
2272 }
2273
2274 ret = msm_spi_request_gpios(dd);
2275 if (ret)
2276 return ret;
2277
2278 clk_prepare_enable(dd->clk);
2279 clk_prepare_enable(dd->pclk);
2280 msm_spi_enable_irqs(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002281 dd->suspended = 0;
2282resume_exit:
2283 return 0;
2284}
Alok Chauhan2a647c92013-03-12 18:34:43 +05302285
2286static int msm_spi_suspend(struct device *device)
2287{
2288 if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
2289 struct platform_device *pdev = to_platform_device(device);
2290 struct spi_master *master = platform_get_drvdata(pdev);
2291 struct msm_spi *dd;
2292
2293 dev_dbg(device, "system suspend");
2294 if (!master)
2295 goto suspend_exit;
2296 dd = spi_master_get_devdata(master);
2297 if (!dd)
2298 goto suspend_exit;
2299 msm_spi_pm_suspend_runtime(device);
2300 }
2301suspend_exit:
2302 return 0;
2303}
2304
2305static int msm_spi_resume(struct device *device)
2306{
2307 /*
2308 * Rely on runtime-PM to call resume in case it is enabled
2309 * Even if it's not enabled, rely on 1st client transaction to do
2310 * clock ON and gpio configuration
2311 */
2312 dev_dbg(device, "system resume");
2313 return 0;
2314}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002315#else
2316#define msm_spi_suspend NULL
2317#define msm_spi_resume NULL
Alok Chauhan2a647c92013-03-12 18:34:43 +05302318#define msm_spi_pm_suspend_runtime NULL
2319#define msm_spi_pm_resume_runtime NULL
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002320#endif /* CONFIG_PM */
2321
2322static int __devexit msm_spi_remove(struct platform_device *pdev)
2323{
2324 struct spi_master *master = platform_get_drvdata(pdev);
2325 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002326
2327 pm_qos_remove_request(&qos_req_list);
2328 spi_debugfs_exit(dd);
2329 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002331 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332
Alok Chauhan2a647c92013-03-12 18:34:43 +05302333 pm_runtime_disable(&pdev->dev);
2334 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002335 clk_put(dd->clk);
2336 clk_put(dd->pclk);
2337 destroy_workqueue(dd->workqueue);
2338 platform_set_drvdata(pdev, 0);
2339 spi_unregister_master(master);
2340 spi_master_put(master);
2341
2342 return 0;
2343}
2344
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002345static struct of_device_id msm_spi_dt_match[] = {
2346 {
2347 .compatible = "qcom,spi-qup-v2",
2348 },
2349 {}
2350};
2351
Alok Chauhan2a647c92013-03-12 18:34:43 +05302352static const struct dev_pm_ops msm_spi_dev_pm_ops = {
2353 SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
2354 SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
2355 msm_spi_pm_resume_runtime, NULL)
2356};
2357
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002358static struct platform_driver msm_spi_driver = {
2359 .driver = {
2360 .name = SPI_DRV_NAME,
2361 .owner = THIS_MODULE,
Alok Chauhan2a647c92013-03-12 18:34:43 +05302362 .pm = &msm_spi_dev_pm_ops,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002363 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002364 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002365 .remove = __exit_p(msm_spi_remove),
2366};
2367
2368static int __init msm_spi_init(void)
2369{
2370 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2371}
2372module_init(msm_spi_init);
2373
2374static void __exit msm_spi_exit(void)
2375{
2376 platform_driver_unregister(&msm_spi_driver);
2377}
2378module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002379
2380MODULE_LICENSE("GPL v2");
2381MODULE_VERSION("0.4");
2382MODULE_ALIAS("platform:"SPI_DRV_NAME);