blob: 5ba244fffead4024314daf6bfde58500c5654bfd [file] [log] [blame]
Alok Chauhanc27843e2013-02-15 16:04:20 +05301/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070046static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
47 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048{
49 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050 unsigned long gsbi_mem_phys_addr;
51 size_t gsbi_mem_size;
52 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070056 return 0;
57
58 gsbi_mem_phys_addr = resource->start;
59 gsbi_mem_size = resource_size(resource);
60 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
61 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063
64 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size);
66 if (!gsbi_base)
67 return -ENXIO;
68
69 /* Set GSBI to SPI mode */
70 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 return 0;
73}
74
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070077 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
78 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
79 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
80 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
81 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
82 if (dd->qup_ver)
83 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084}
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086static inline int msm_spi_request_gpios(struct msm_spi *dd)
87{
88 int i;
89 int result = 0;
90
91 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
92 if (dd->spi_gpios[i] >= 0) {
93 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
94 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060095 dev_err(dd->dev, "%s: gpio_request for pin %d "
96 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 dd->spi_gpios[i], result);
98 goto error;
99 }
100 }
101 }
102 return 0;
103
104error:
105 for (; --i >= 0;) {
106 if (dd->spi_gpios[i] >= 0)
107 gpio_free(dd->spi_gpios[i]);
108 }
109 return result;
110}
111
112static inline void msm_spi_free_gpios(struct msm_spi *dd)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
117 if (dd->spi_gpios[i] >= 0)
118 gpio_free(dd->spi_gpios[i]);
119 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600120
121 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
122 if (dd->cs_gpios[i].valid) {
123 gpio_free(dd->cs_gpios[i].gpio_num);
124 dd->cs_gpios[i].valid = 0;
125 }
126 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127}
128
129static void msm_spi_clock_set(struct msm_spi *dd, int speed)
130{
131 int rc;
132
133 rc = clk_set_rate(dd->clk, speed);
134 if (!rc)
135 dd->clock_speed = speed;
136}
137
138static int msm_spi_calculate_size(int *fifo_size,
139 int *block_size,
140 int block,
141 int mult)
142{
143 int words;
144
145 switch (block) {
146 case 0:
147 words = 1; /* 4 bytes */
148 break;
149 case 1:
150 words = 4; /* 16 bytes */
151 break;
152 case 2:
153 words = 8; /* 32 bytes */
154 break;
155 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 switch (mult) {
160 case 0:
161 *fifo_size = words * 2;
162 break;
163 case 1:
164 *fifo_size = words * 4;
165 break;
166 case 2:
167 *fifo_size = words * 8;
168 break;
169 case 3:
170 *fifo_size = words * 16;
171 break;
172 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 *block_size = words * sizeof(u32); /* in bytes */
177 return 0;
178}
179
180static void get_next_transfer(struct msm_spi *dd)
181{
182 struct spi_transfer *t = dd->cur_transfer;
183
184 if (t->transfer_list.next != &dd->cur_msg->transfers) {
185 dd->cur_transfer = list_entry(t->transfer_list.next,
186 struct spi_transfer,
187 transfer_list);
188 dd->write_buf = dd->cur_transfer->tx_buf;
189 dd->read_buf = dd->cur_transfer->rx_buf;
190 }
191}
192
193static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
194{
195 u32 spi_iom;
196 int block;
197 int mult;
198
199 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
200
201 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
202 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
203 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
204 block, mult)) {
205 goto fifo_size_err;
206 }
207
208 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
209 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
210 if (msm_spi_calculate_size(&dd->output_fifo_size,
211 &dd->output_block_size, block, mult)) {
212 goto fifo_size_err;
213 }
214 /* DM mode is not available for this block size */
215 if (dd->input_block_size == 4 || dd->output_block_size == 4)
216 dd->use_dma = 0;
217
Alok Chauhanc27843e2013-02-15 16:04:20 +0530218 if (dd->use_dma) {
219 dd->input_burst_size = max(dd->input_block_size,
220 DM_BURST_SIZE);
221 dd->output_burst_size = max(dd->output_block_size,
222 DM_BURST_SIZE);
223 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 return;
225
226fifo_size_err:
227 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700228 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 return;
230}
231
232static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
233{
234 u32 data_in;
235 int i;
236 int shift;
237
238 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
239 if (dd->read_buf) {
240 for (i = 0; (i < dd->bytes_per_word) &&
241 dd->rx_bytes_remaining; i++) {
242 /* The data format depends on bytes_per_word:
243 4 bytes: 0x12345678
244 3 bytes: 0x00123456
245 2 bytes: 0x00001234
246 1 byte : 0x00000012
247 */
248 shift = 8 * (dd->bytes_per_word - i - 1);
249 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
250 dd->rx_bytes_remaining--;
251 }
252 } else {
253 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
254 dd->rx_bytes_remaining -= dd->bytes_per_word;
255 else
256 dd->rx_bytes_remaining = 0;
257 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 dd->read_xfr_cnt++;
260 if (dd->multi_xfr) {
261 if (!dd->rx_bytes_remaining)
262 dd->read_xfr_cnt = 0;
263 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
264 dd->read_len) {
265 struct spi_transfer *t = dd->cur_rx_transfer;
266 if (t->transfer_list.next != &dd->cur_msg->transfers) {
267 t = list_entry(t->transfer_list.next,
268 struct spi_transfer,
269 transfer_list);
270 dd->read_buf = t->rx_buf;
271 dd->read_len = t->len;
272 dd->read_xfr_cnt = 0;
273 dd->cur_rx_transfer = t;
274 }
275 }
276 }
277}
278
279static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
280{
281 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
282
283 return spi_op & SPI_OP_STATE_VALID;
284}
285
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600286static inline void msm_spi_udelay(unsigned long delay_usecs)
287{
288 /*
289 * For smaller values of delay, context switch time
290 * would negate the usage of usleep
291 */
292 if (delay_usecs > 20)
293 usleep_range(delay_usecs, delay_usecs);
294 else if (delay_usecs)
295 udelay(delay_usecs);
296}
297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298static inline int msm_spi_wait_valid(struct msm_spi *dd)
299{
300 unsigned long delay = 0;
301 unsigned long timeout = 0;
302
303 if (dd->clock_speed == 0)
304 return -EINVAL;
305 /*
306 * Based on the SPI clock speed, sufficient time
307 * should be given for the SPI state transition
308 * to occur
309 */
310 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
311 /*
312 * For small delay values, the default timeout would
313 * be one jiffy
314 */
315 if (delay < SPI_DELAY_THRESHOLD)
316 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600317
318 /* Adding one to round off to the nearest jiffy */
319 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320 while (!msm_spi_is_valid_state(dd)) {
321 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600322 if (!msm_spi_is_valid_state(dd)) {
323 if (dd->cur_msg)
324 dd->cur_msg->status = -EIO;
325 dev_err(dd->dev, "%s: SPI operational state"
326 "not valid\n", __func__);
327 return -ETIMEDOUT;
328 } else
329 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 }
Sagar Dharia2840b0a2012-11-02 18:26:01 -0600331 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 }
333 return 0;
334}
335
336static inline int msm_spi_set_state(struct msm_spi *dd,
337 enum msm_spi_state state)
338{
339 enum msm_spi_state cur_state;
340 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700341 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 cur_state = readl_relaxed(dd->base + SPI_STATE);
343 /* Per spec:
344 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
345 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
346 (state == SPI_OP_STATE_RESET)) {
347 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
348 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
349 } else {
350 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
351 dd->base + SPI_STATE);
352 }
353 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700354 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355
356 return 0;
357}
358
359static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
360{
361 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
362
363 if (n != (*config & SPI_CFG_N))
364 *config = (*config & ~SPI_CFG_N) | n;
365
366 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
367 if (dd->read_buf == NULL)
368 *config |= SPI_NO_INPUT;
369 if (dd->write_buf == NULL)
370 *config |= SPI_NO_OUTPUT;
371 }
372}
373
374static void msm_spi_set_config(struct msm_spi *dd, int bpw)
375{
376 u32 spi_config;
377
378 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
379
380 if (dd->cur_msg->spi->mode & SPI_CPHA)
381 spi_config &= ~SPI_CFG_INPUT_FIRST;
382 else
383 spi_config |= SPI_CFG_INPUT_FIRST;
384 if (dd->cur_msg->spi->mode & SPI_LOOP)
385 spi_config |= SPI_CFG_LOOPBACK;
386 else
387 spi_config &= ~SPI_CFG_LOOPBACK;
388 msm_spi_add_configs(dd, &spi_config, bpw-1);
389 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
390 msm_spi_set_qup_config(dd, bpw);
391}
392
393static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
394{
395 dmov_box *box;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530396 int bytes_to_send, bytes_sent;
397 int tx_num_rows, rx_num_rows;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 u32 num_transfers;
399
400 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530401 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 if (dd->write_len && !dd->read_len) {
403 /* WR-WR transfer */
404 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
405 dd->write_buf = dd->temp_buf;
406 } else {
407 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
408 /* For WR-RD transfer, bytes_sent can be negative */
409 if (bytes_sent < 0)
410 bytes_sent = 0;
411 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530412 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gunda2b285652012-07-30 13:22:39 +0530413 * 4K bytes for targets that have only 12 bits in
414 * QUP_MAX_OUTPUT_CNT register. If the target supports
415 * more than 12bits then we send the data in chunks of
416 * the infinite_mode value that is defined in the
417 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530418 */
419 if (!dd->pdata->infinite_mode)
Kiran Gunda2b285652012-07-30 13:22:39 +0530420 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530421 else
Kiran Gunda2b285652012-07-30 13:22:39 +0530422 dd->max_trfr_len = (dd->pdata->infinite_mode) *
423 (dd->bytes_per_word);
424
425 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
426 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530429 dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
430 dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
431 tx_num_rows = bytes_to_send / dd->output_burst_size;
432 rx_num_rows = bytes_to_send / dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433
434 dd->mode = SPI_DMOV_MODE;
435
Alok Chauhanc27843e2013-02-15 16:04:20 +0530436 if (tx_num_rows) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 /* src in 16 MSB, dst in 16 LSB */
438 box = &dd->tx_dmov_cmd->box;
439 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530440 box->src_dst_len
441 = (dd->output_burst_size << 16) | dd->output_burst_size;
442 box->num_rows = (tx_num_rows << 16) | tx_num_rows;
443 box->row_offset = (dd->output_burst_size << 16) | 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444
Alok Chauhanc27843e2013-02-15 16:04:20 +0530445 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
446 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
447 offsetof(struct spi_dmov_cmd, box));
448 } else {
449 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
450 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
451 offsetof(struct spi_dmov_cmd, single_pad));
452 }
453
454 if (rx_num_rows) {
455 /* src in 16 MSB, dst in 16 LSB */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456 box = &dd->rx_dmov_cmd->box;
457 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530458 box->src_dst_len
459 = (dd->input_burst_size << 16) | dd->input_burst_size;
460 box->num_rows = (rx_num_rows << 16) | rx_num_rows;
461 box->row_offset = (0 << 16) | dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
464 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
465 offsetof(struct spi_dmov_cmd, box));
466 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
468 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
469 offsetof(struct spi_dmov_cmd, single_pad));
470 }
471
Alok Chauhanc27843e2013-02-15 16:04:20 +0530472 if (!dd->tx_unaligned_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 } else {
475 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530476 u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477
478 if ((dd->multi_xfr) && (dd->read_len <= 0))
Alok Chauhanc27843e2013-02-15 16:04:20 +0530479 tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480
481 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482
Alok Chauhanc27843e2013-02-15 16:04:20 +0530483 memset(dd->tx_padding, 0, dd->output_burst_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 if (dd->write_buf)
Alok Chauhanc27843e2013-02-15 16:04:20 +0530485 memcpy(dd->tx_padding, dd->write_buf + tx_offset,
486 dd->tx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487
488 tx_cmd->src = dd->tx_padding_dma;
Alok Chauhanc27843e2013-02-15 16:04:20 +0530489 tx_cmd->len = dd->output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 }
Alok Chauhanc27843e2013-02-15 16:04:20 +0530491
492 if (!dd->rx_unaligned_len) {
493 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
494 } else {
495 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
496 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
497
498 memset(dd->rx_padding, 0, dd->input_burst_size);
499 rx_cmd->dst = dd->rx_padding_dma;
500 rx_cmd->len = dd->input_burst_size;
501 }
502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 /* This also takes care of the padding dummy buf
504 Since this is set to the correct length, the
505 dummy bytes won't be actually sent */
506 if (dd->multi_xfr) {
507 u32 write_transfers = 0;
508 u32 read_transfers = 0;
509
510 if (dd->write_len > 0) {
511 write_transfers = DIV_ROUND_UP(dd->write_len,
512 dd->bytes_per_word);
513 writel_relaxed(write_transfers,
514 dd->base + SPI_MX_OUTPUT_COUNT);
515 }
516 if (dd->read_len > 0) {
517 /*
518 * The read following a write transfer must take
519 * into account, that the bytes pertaining to
520 * the write transfer needs to be discarded,
521 * before the actual read begins.
522 */
523 read_transfers = DIV_ROUND_UP(dd->read_len +
524 dd->write_len,
525 dd->bytes_per_word);
526 writel_relaxed(read_transfers,
527 dd->base + SPI_MX_INPUT_COUNT);
528 }
529 } else {
530 if (dd->write_buf)
531 writel_relaxed(num_transfers,
532 dd->base + SPI_MX_OUTPUT_COUNT);
533 if (dd->read_buf)
534 writel_relaxed(num_transfers,
535 dd->base + SPI_MX_INPUT_COUNT);
536 }
537}
538
539static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
540{
541 dma_coherent_pre_ops();
542 if (dd->write_buf)
543 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
544 if (dd->read_buf)
545 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
546}
547
Kiran Gunda2b285652012-07-30 13:22:39 +0530548/* SPI core on targets that does not support infinite mode can send
549 maximum of 4K transfers or 64K transfers depending up on size of
550 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
551 chunks. Upon completion we send the next chunk, or complete the
552 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530553 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554*/
555static int msm_spi_dm_send_next(struct msm_spi *dd)
556{
557 /* By now we should have sent all the bytes in FIFO mode,
558 * However to make things right, we'll check anyway.
559 */
560 if (dd->mode != SPI_DMOV_MODE)
561 return 0;
562
Kiran Gundae8f16742012-06-27 10:06:32 +0530563 /* On targets which does not support infinite mode,
564 We need to send more chunks, if we sent max last time */
Kiran Gunda2b285652012-07-30 13:22:39 +0530565 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
566 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
568 return 0;
569 dd->read_len = dd->write_len = 0;
570 msm_spi_setup_dm_transfer(dd);
571 msm_spi_enqueue_dm_commands(dd);
572 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
573 return 0;
574 return 1;
575 } else if (dd->read_len && dd->write_len) {
576 dd->tx_bytes_remaining -= dd->cur_transfer->len;
577 if (list_is_last(&dd->cur_transfer->transfer_list,
578 &dd->cur_msg->transfers))
579 return 0;
580 get_next_transfer(dd);
581 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
582 return 0;
583 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
584 dd->read_buf = dd->temp_buf;
585 dd->read_len = dd->write_len = -1;
586 msm_spi_setup_dm_transfer(dd);
587 msm_spi_enqueue_dm_commands(dd);
588 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
589 return 0;
590 return 1;
591 }
592 return 0;
593}
594
595static inline void msm_spi_ack_transfer(struct msm_spi *dd)
596{
597 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
598 SPI_OP_MAX_OUTPUT_DONE_FLAG,
599 dd->base + SPI_OPERATIONAL);
600 /* Ensure done flag was cleared before proceeding further */
601 mb();
602}
603
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700604/* Figure which irq occured and call the relevant functions */
605static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
606{
607 u32 op, ret = IRQ_NONE;
608 struct msm_spi *dd = dev_id;
609
610 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
611 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
612 struct spi_master *master = dev_get_drvdata(dd->dev);
613 ret |= msm_spi_error_irq(irq, master);
614 }
615
616 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
617 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
618 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
619 dd->base + SPI_OPERATIONAL);
620 /*
621 * Ensure service flag was cleared before further
622 * processing of interrupt.
623 */
624 mb();
625 ret |= msm_spi_input_irq(irq, dev_id);
626 }
627
628 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
629 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
630 dd->base + SPI_OPERATIONAL);
631 /*
632 * Ensure service flag was cleared before further
633 * processing of interrupt.
634 */
635 mb();
636 ret |= msm_spi_output_irq(irq, dev_id);
637 }
638
639 if (dd->done) {
640 complete(&dd->transfer_complete);
641 dd->done = 0;
642 }
643 return ret;
644}
645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
647{
648 struct msm_spi *dd = dev_id;
649
650 dd->stat_rx++;
651
652 if (dd->mode == SPI_MODE_NONE)
653 return IRQ_HANDLED;
654
655 if (dd->mode == SPI_DMOV_MODE) {
656 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
657 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
658 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
659 msm_spi_ack_transfer(dd);
Alok Chauhanc27843e2013-02-15 16:04:20 +0530660 if (dd->rx_unaligned_len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 if (atomic_inc_return(&dd->rx_irq_called) == 1)
662 return IRQ_HANDLED;
663 }
664 msm_spi_complete(dd);
665 return IRQ_HANDLED;
666 }
667 return IRQ_NONE;
668 }
669
670 if (dd->mode == SPI_FIFO_MODE) {
671 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
672 SPI_OP_IP_FIFO_NOT_EMPTY) &&
673 (dd->rx_bytes_remaining > 0)) {
674 msm_spi_read_word_from_fifo(dd);
675 }
676 if (dd->rx_bytes_remaining == 0)
677 msm_spi_complete(dd);
678 }
679
680 return IRQ_HANDLED;
681}
682
683static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
684{
685 u32 word;
686 u8 byte;
687 int i;
688
689 word = 0;
690 if (dd->write_buf) {
691 for (i = 0; (i < dd->bytes_per_word) &&
692 dd->tx_bytes_remaining; i++) {
693 dd->tx_bytes_remaining--;
694 byte = *dd->write_buf++;
695 word |= (byte << (BITS_PER_BYTE * (3 - i)));
696 }
697 } else
698 if (dd->tx_bytes_remaining > dd->bytes_per_word)
699 dd->tx_bytes_remaining -= dd->bytes_per_word;
700 else
701 dd->tx_bytes_remaining = 0;
702 dd->write_xfr_cnt++;
703 if (dd->multi_xfr) {
704 if (!dd->tx_bytes_remaining)
705 dd->write_xfr_cnt = 0;
706 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
707 dd->write_len) {
708 struct spi_transfer *t = dd->cur_tx_transfer;
709 if (t->transfer_list.next != &dd->cur_msg->transfers) {
710 t = list_entry(t->transfer_list.next,
711 struct spi_transfer,
712 transfer_list);
713 dd->write_buf = t->tx_buf;
714 dd->write_len = t->len;
715 dd->write_xfr_cnt = 0;
716 dd->cur_tx_transfer = t;
717 }
718 }
719 }
720 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
721}
722
723static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
724{
725 int count = 0;
726
727 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
728 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
729 SPI_OP_OUTPUT_FIFO_FULL)) {
730 msm_spi_write_word_to_fifo(dd);
731 count++;
732 }
733}
734
735static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
736{
737 struct msm_spi *dd = dev_id;
738
739 dd->stat_tx++;
740
741 if (dd->mode == SPI_MODE_NONE)
742 return IRQ_HANDLED;
743
744 if (dd->mode == SPI_DMOV_MODE) {
745 /* TX_ONLY transaction is handled here
746 This is the only place we send complete at tx and not rx */
747 if (dd->read_buf == NULL &&
748 readl_relaxed(dd->base + SPI_OPERATIONAL) &
749 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
750 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530751 if (atomic_inc_return(&dd->tx_irq_called) == 1)
752 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753 msm_spi_complete(dd);
754 return IRQ_HANDLED;
755 }
756 return IRQ_NONE;
757 }
758
759 /* Output FIFO is empty. Transmit any outstanding write data. */
760 if (dd->mode == SPI_FIFO_MODE)
761 msm_spi_write_rmn_to_fifo(dd);
762
763 return IRQ_HANDLED;
764}
765
766static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
767{
768 struct spi_master *master = dev_id;
769 struct msm_spi *dd = spi_master_get_devdata(master);
770 u32 spi_err;
771
772 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
773 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
774 dev_warn(master->dev.parent, "SPI output overrun error\n");
775 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
776 dev_warn(master->dev.parent, "SPI input underrun error\n");
777 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
778 dev_warn(master->dev.parent, "SPI output underrun error\n");
779 msm_spi_get_clk_err(dd, &spi_err);
780 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
781 dev_warn(master->dev.parent, "SPI clock overrun error\n");
782 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
783 dev_warn(master->dev.parent, "SPI clock underrun error\n");
784 msm_spi_clear_error_flags(dd);
785 msm_spi_ack_clk_err(dd);
786 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
787 mb();
788 return IRQ_HANDLED;
789}
790
791static int msm_spi_map_dma_buffers(struct msm_spi *dd)
792{
793 struct device *dev;
794 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600795 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796 void *tx_buf, *rx_buf;
797 unsigned tx_len, rx_len;
798 int ret = -EINVAL;
799
800 dev = &dd->cur_msg->spi->dev;
801 first_xfr = dd->cur_transfer;
802 tx_buf = (void *)first_xfr->tx_buf;
803 rx_buf = first_xfr->rx_buf;
804 tx_len = rx_len = first_xfr->len;
805
806 /*
807 * For WR-WR and WR-RD transfers, we allocate our own temporary
808 * buffer and copy the data to/from the client buffers.
809 */
810 if (dd->multi_xfr) {
811 dd->temp_buf = kzalloc(dd->cur_msg_len,
812 GFP_KERNEL | __GFP_DMA);
813 if (!dd->temp_buf)
814 return -ENOMEM;
815 nxt_xfr = list_entry(first_xfr->transfer_list.next,
816 struct spi_transfer, transfer_list);
817
818 if (dd->write_len && !dd->read_len) {
819 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
820 goto error;
821
822 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
823 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
824 nxt_xfr->len);
825 tx_buf = dd->temp_buf;
826 tx_len = dd->cur_msg_len;
827 } else {
828 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
829 goto error;
830
831 rx_buf = dd->temp_buf;
832 rx_len = dd->cur_msg_len;
833 }
834 }
835 if (tx_buf != NULL) {
836 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
837 tx_len, DMA_TO_DEVICE);
838 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
839 dev_err(dev, "dma %cX %d bytes error\n",
840 'T', tx_len);
841 ret = -ENOMEM;
842 goto error;
843 }
844 }
845 if (rx_buf != NULL) {
846 dma_addr_t dma_handle;
847 dma_handle = dma_map_single(dev, rx_buf,
848 rx_len, DMA_FROM_DEVICE);
849 if (dma_mapping_error(NULL, dma_handle)) {
850 dev_err(dev, "dma %cX %d bytes error\n",
851 'R', rx_len);
852 if (tx_buf != NULL)
853 dma_unmap_single(NULL, first_xfr->tx_dma,
854 tx_len, DMA_TO_DEVICE);
855 ret = -ENOMEM;
856 goto error;
857 }
858 if (dd->multi_xfr)
859 nxt_xfr->rx_dma = dma_handle;
860 else
861 first_xfr->rx_dma = dma_handle;
862 }
863 return 0;
864
865error:
866 kfree(dd->temp_buf);
867 dd->temp_buf = NULL;
868 return ret;
869}
870
871static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
872{
873 struct device *dev;
874 u32 offset;
875
876 dev = &dd->cur_msg->spi->dev;
877 if (dd->cur_msg->is_dma_mapped)
878 goto unmap_end;
879
880 if (dd->multi_xfr) {
881 if (dd->write_len && !dd->read_len) {
882 dma_unmap_single(dev,
883 dd->cur_transfer->tx_dma,
884 dd->cur_msg_len,
885 DMA_TO_DEVICE);
886 } else {
887 struct spi_transfer *prev_xfr;
888 prev_xfr = list_entry(
889 dd->cur_transfer->transfer_list.prev,
890 struct spi_transfer,
891 transfer_list);
892 if (dd->cur_transfer->rx_buf) {
893 dma_unmap_single(dev,
894 dd->cur_transfer->rx_dma,
895 dd->cur_msg_len,
896 DMA_FROM_DEVICE);
897 }
898 if (prev_xfr->tx_buf) {
899 dma_unmap_single(dev,
900 prev_xfr->tx_dma,
901 prev_xfr->len,
902 DMA_TO_DEVICE);
903 }
Alok Chauhanc27843e2013-02-15 16:04:20 +0530904 if (dd->rx_unaligned_len && dd->read_buf) {
905 offset = dd->cur_msg_len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 dma_coherent_post_ops();
907 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhanc27843e2013-02-15 16:04:20 +0530908 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909 memcpy(dd->cur_transfer->rx_buf,
910 dd->read_buf + prev_xfr->len,
911 dd->cur_transfer->len);
912 }
913 }
914 kfree(dd->temp_buf);
915 dd->temp_buf = NULL;
916 return;
917 } else {
918 if (dd->cur_transfer->rx_buf)
919 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
920 dd->cur_transfer->len,
921 DMA_FROM_DEVICE);
922 if (dd->cur_transfer->tx_buf)
923 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
924 dd->cur_transfer->len,
925 DMA_TO_DEVICE);
926 }
927
928unmap_end:
929 /* If we padded the transfer, we copy it from the padding buf */
Alok Chauhanc27843e2013-02-15 16:04:20 +0530930 if (dd->rx_unaligned_len && dd->read_buf) {
931 offset = dd->cur_transfer->len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 dma_coherent_post_ops();
933 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhanc27843e2013-02-15 16:04:20 +0530934 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935 }
936}
937
938/**
939 * msm_use_dm - decides whether to use data mover for this
940 * transfer
941 * @dd: device
942 * @tr: transfer
943 *
944 * Start using DM if:
945 * 1. Transfer is longer than 3*block size.
946 * 2. Buffers should be aligned to cache line.
947 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
948 */
949static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
950 u8 bpw)
951{
952 u32 cache_line = dma_get_cache_alignment();
953
954 if (!dd->use_dma)
955 return 0;
956
957 if (dd->cur_msg_len < 3*dd->input_block_size)
958 return 0;
959
960 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
961 return 0;
962
963 if (tr->tx_buf) {
964 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
965 return 0;
966 }
967 if (tr->rx_buf) {
968 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
969 return 0;
970 }
971
972 if (tr->cs_change &&
973 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
974 return 0;
975 return 1;
976}
977
978static void msm_spi_process_transfer(struct msm_spi *dd)
979{
980 u8 bpw;
981 u32 spi_ioc;
982 u32 spi_iom;
983 u32 spi_ioc_orig;
984 u32 max_speed;
985 u32 chip_select;
986 u32 read_count;
987 u32 timeout;
988 u32 int_loopback = 0;
989
990 dd->tx_bytes_remaining = dd->cur_msg_len;
991 dd->rx_bytes_remaining = dd->cur_msg_len;
992 dd->read_buf = dd->cur_transfer->rx_buf;
993 dd->write_buf = dd->cur_transfer->tx_buf;
994 init_completion(&dd->transfer_complete);
995 if (dd->cur_transfer->bits_per_word)
996 bpw = dd->cur_transfer->bits_per_word;
997 else
998 if (dd->cur_msg->spi->bits_per_word)
999 bpw = dd->cur_msg->spi->bits_per_word;
1000 else
1001 bpw = 8;
1002 dd->bytes_per_word = (bpw + 7) / 8;
1003
1004 if (dd->cur_transfer->speed_hz)
1005 max_speed = dd->cur_transfer->speed_hz;
1006 else
1007 max_speed = dd->cur_msg->spi->max_speed_hz;
1008 if (!dd->clock_speed || max_speed != dd->clock_speed)
1009 msm_spi_clock_set(dd, max_speed);
1010
1011 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1012 if (dd->cur_msg->spi->mode & SPI_LOOP)
1013 int_loopback = 1;
1014 if (int_loopback && dd->multi_xfr &&
1015 (read_count > dd->input_fifo_size)) {
1016 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001017 pr_err(
1018 "%s:Internal Loopback does not support > fifo size"
1019 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020 __func__);
1021 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001022 pr_err(
1023 "%s:Internal Loopback does not support > fifo size"
1024 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001025 __func__);
1026 return;
1027 }
1028 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1029 dd->mode = SPI_FIFO_MODE;
1030 if (dd->multi_xfr) {
1031 dd->read_len = dd->cur_transfer->len;
1032 dd->write_len = dd->cur_transfer->len;
1033 }
1034 /* read_count cannot exceed fifo_size, and only one READ COUNT
1035 interrupt is generated per transaction, so for transactions
1036 larger than fifo size READ COUNT must be disabled.
1037 For those transactions we usually move to Data Mover mode.
1038 */
1039 if (read_count <= dd->input_fifo_size) {
1040 writel_relaxed(read_count,
1041 dd->base + SPI_MX_READ_COUNT);
1042 msm_spi_set_write_count(dd, read_count);
1043 } else {
1044 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1045 msm_spi_set_write_count(dd, 0);
1046 }
1047 } else {
1048 dd->mode = SPI_DMOV_MODE;
1049 if (dd->write_len && dd->read_len) {
1050 dd->tx_bytes_remaining = dd->write_len;
1051 dd->rx_bytes_remaining = dd->read_len;
1052 }
1053 }
1054
1055 /* Write mode - fifo or data mover*/
1056 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1057 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1058 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1059 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1060 /* Turn on packing for data mover */
1061 if (dd->mode == SPI_DMOV_MODE)
1062 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1063 else
1064 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1065 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1066
1067 msm_spi_set_config(dd, bpw);
1068
1069 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1070 spi_ioc_orig = spi_ioc;
1071 if (dd->cur_msg->spi->mode & SPI_CPOL)
1072 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1073 else
1074 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1075 chip_select = dd->cur_msg->spi->chip_select << 2;
1076 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1077 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1078 if (!dd->cur_transfer->cs_change)
1079 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1080 if (spi_ioc != spi_ioc_orig)
1081 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1082
1083 if (dd->mode == SPI_DMOV_MODE) {
1084 msm_spi_setup_dm_transfer(dd);
1085 msm_spi_enqueue_dm_commands(dd);
1086 }
1087 /* The output fifo interrupt handler will handle all writes after
1088 the first. Restricting this to one write avoids contention
1089 issues and race conditions between this thread and the int handler
1090 */
1091 else if (dd->mode == SPI_FIFO_MODE) {
1092 if (msm_spi_prepare_for_write(dd))
1093 goto transfer_end;
1094 msm_spi_start_write(dd, read_count);
1095 }
1096
1097 /* Only enter the RUN state after the first word is written into
1098 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1099 might fire before the first word is written resulting in a
1100 possible race condition.
1101 */
1102 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1103 goto transfer_end;
1104
1105 timeout = 100 * msecs_to_jiffies(
1106 DIV_ROUND_UP(dd->cur_msg_len * 8,
1107 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1108
1109 /* Assume success, this might change later upon transaction result */
1110 dd->cur_msg->status = 0;
1111 do {
1112 if (!wait_for_completion_timeout(&dd->transfer_complete,
1113 timeout)) {
1114 dev_err(dd->dev, "%s: SPI transaction "
1115 "timeout\n", __func__);
1116 dd->cur_msg->status = -EIO;
1117 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001118 msm_dmov_flush(dd->tx_dma_chan, 1);
1119 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120 }
1121 break;
1122 }
1123 } while (msm_spi_dm_send_next(dd));
1124
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001125 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126transfer_end:
1127 if (dd->mode == SPI_DMOV_MODE)
1128 msm_spi_unmap_dma_buffers(dd);
1129 dd->mode = SPI_MODE_NONE;
1130
1131 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1132 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1133 dd->base + SPI_IO_CONTROL);
1134}
1135
1136static void get_transfer_length(struct msm_spi *dd)
1137{
1138 struct spi_transfer *tr;
1139 int num_xfrs = 0;
1140 int readlen = 0;
1141 int writelen = 0;
1142
1143 dd->cur_msg_len = 0;
1144 dd->multi_xfr = 0;
1145 dd->read_len = dd->write_len = 0;
1146
1147 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1148 if (tr->tx_buf)
1149 writelen += tr->len;
1150 if (tr->rx_buf)
1151 readlen += tr->len;
1152 dd->cur_msg_len += tr->len;
1153 num_xfrs++;
1154 }
1155
1156 if (num_xfrs == 2) {
1157 struct spi_transfer *first_xfr = dd->cur_transfer;
1158
1159 dd->multi_xfr = 1;
1160 tr = list_entry(first_xfr->transfer_list.next,
1161 struct spi_transfer,
1162 transfer_list);
1163 /*
1164 * We update dd->read_len and dd->write_len only
1165 * for WR-WR and WR-RD transfers.
1166 */
1167 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1168 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1169 ((!tr->tx_buf) && (tr->rx_buf))) {
1170 dd->read_len = readlen;
1171 dd->write_len = writelen;
1172 }
1173 }
1174 } else if (num_xfrs > 1)
1175 dd->multi_xfr = 1;
1176}
1177
1178static inline int combine_transfers(struct msm_spi *dd)
1179{
1180 struct spi_transfer *t = dd->cur_transfer;
1181 struct spi_transfer *nxt;
1182 int xfrs_grped = 1;
1183
1184 dd->cur_msg_len = dd->cur_transfer->len;
1185 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1186 nxt = list_entry(t->transfer_list.next,
1187 struct spi_transfer,
1188 transfer_list);
1189 if (t->cs_change != nxt->cs_change)
1190 return xfrs_grped;
1191 dd->cur_msg_len += nxt->len;
1192 xfrs_grped++;
1193 t = nxt;
1194 }
1195 return xfrs_grped;
1196}
1197
Harini Jayaraman093938a2012-04-20 15:33:23 -06001198static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1199{
1200 u32 spi_ioc;
1201 u32 spi_ioc_orig;
1202
1203 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1204 spi_ioc_orig = spi_ioc;
1205 if (set_flag)
1206 spi_ioc |= SPI_IO_C_FORCE_CS;
1207 else
1208 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1209
1210 if (spi_ioc != spi_ioc_orig)
1211 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1212}
1213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214static void msm_spi_process_message(struct msm_spi *dd)
1215{
1216 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001217 int cs_num;
1218 int rc;
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001219 bool xfer_delay = false;
1220 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001223 cs_num = dd->cur_msg->spi->chip_select;
1224 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1225 (!(dd->cs_gpios[cs_num].valid)) &&
1226 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1227 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1228 spi_cs_rsrcs[cs_num]);
1229 if (rc) {
1230 dev_err(dd->dev, "gpio_request for pin %d failed with "
1231 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1232 rc);
1233 return;
1234 }
1235 dd->cs_gpios[cs_num].valid = 1;
1236 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001238 list_for_each_entry(tr,
1239 &dd->cur_msg->transfers,
1240 transfer_list) {
1241 if (tr->delay_usecs) {
1242 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1243 tr->delay_usecs);
1244 xfer_delay = true;
1245 break;
1246 }
1247 }
1248
1249 /* Don't combine xfers if delay is needed after every xfer */
1250 if (dd->qup_ver || xfer_delay) {
1251 if (dd->qup_ver)
1252 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001254 &dd->cur_msg->transfers,
1255 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001256 struct spi_transfer *t = dd->cur_transfer;
1257 struct spi_transfer *nxt;
1258
1259 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1260 nxt = list_entry(t->transfer_list.next,
1261 struct spi_transfer,
1262 transfer_list);
1263
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001264 if (dd->qup_ver &&
1265 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001266 write_force_cs(dd, 1);
Sagar Dharia2840b0a2012-11-02 18:26:01 -06001267 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001268 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001270
1271 dd->cur_msg_len = dd->cur_transfer->len;
1272 msm_spi_process_transfer(dd);
1273 }
1274 } else {
1275 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1276 struct spi_transfer,
1277 transfer_list);
1278 get_transfer_length(dd);
1279 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1280 /*
1281 * Handling of multi-transfers.
1282 * FIFO mode is used by default
1283 */
1284 list_for_each_entry(dd->cur_transfer,
1285 &dd->cur_msg->transfers,
1286 transfer_list) {
1287 if (!dd->cur_transfer->len)
1288 goto error;
1289 if (xfrs_grped) {
1290 xfrs_grped--;
1291 continue;
1292 } else {
1293 dd->read_len = dd->write_len = 0;
1294 xfrs_grped = combine_transfers(dd);
1295 }
1296
1297 dd->cur_tx_transfer = dd->cur_transfer;
1298 dd->cur_rx_transfer = dd->cur_transfer;
1299 msm_spi_process_transfer(dd);
1300 xfrs_grped--;
1301 }
1302 } else {
1303 /* Handling of a single transfer or
1304 * WR-WR or WR-RD transfers
1305 */
1306 if ((!dd->cur_msg->is_dma_mapped) &&
1307 (msm_use_dm(dd, dd->cur_transfer,
1308 dd->cur_transfer->bits_per_word))) {
1309 /* Mapping of DMA buffers */
1310 int ret = msm_spi_map_dma_buffers(dd);
1311 if (ret < 0) {
1312 dd->cur_msg->status = ret;
1313 goto error;
1314 }
1315 }
1316
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001317 dd->cur_tx_transfer = dd->cur_transfer;
1318 dd->cur_rx_transfer = dd->cur_transfer;
1319 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001322
1323 return;
1324
1325error:
1326 if (dd->cs_gpios[cs_num].valid) {
1327 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1328 dd->cs_gpios[cs_num].valid = 0;
1329 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330}
1331
1332/* workqueue - pull messages from queue & process */
1333static void msm_spi_workq(struct work_struct *work)
1334{
1335 struct msm_spi *dd =
1336 container_of(work, struct msm_spi, work_data);
1337 unsigned long flags;
1338 u32 status_error = 0;
Alok Chauhanb5f53792012-08-22 19:54:45 +05301339 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340
1341 mutex_lock(&dd->core_lock);
1342
1343 /* Don't allow power collapse until we release mutex */
1344 if (pm_qos_request_active(&qos_req_list))
1345 pm_qos_update_request(&qos_req_list,
1346 dd->pm_lat);
1347 if (dd->use_rlock)
1348 remote_mutex_lock(&dd->r_lock);
1349
Alok Chauhanb5f53792012-08-22 19:54:45 +05301350 /* Configure the spi clk, miso, mosi and cs gpio */
1351 if (dd->pdata->gpio_config) {
1352 rc = dd->pdata->gpio_config();
1353 if (rc) {
1354 dev_err(dd->dev,
1355 "%s: error configuring GPIOs\n",
1356 __func__);
1357 status_error = 1;
1358 }
1359 }
1360
1361 rc = msm_spi_request_gpios(dd);
1362 if (rc)
1363 status_error = 1;
1364
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001365 clk_prepare_enable(dd->clk);
1366 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367 msm_spi_enable_irqs(dd);
1368
1369 if (!msm_spi_is_valid_state(dd)) {
1370 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1371 __func__);
1372 status_error = 1;
1373 }
1374
1375 spin_lock_irqsave(&dd->queue_lock, flags);
1376 while (!list_empty(&dd->queue)) {
1377 dd->cur_msg = list_entry(dd->queue.next,
1378 struct spi_message, queue);
1379 list_del_init(&dd->cur_msg->queue);
1380 spin_unlock_irqrestore(&dd->queue_lock, flags);
1381 if (status_error)
1382 dd->cur_msg->status = -EIO;
1383 else
1384 msm_spi_process_message(dd);
1385 if (dd->cur_msg->complete)
1386 dd->cur_msg->complete(dd->cur_msg->context);
1387 spin_lock_irqsave(&dd->queue_lock, flags);
1388 }
1389 dd->transfer_pending = 0;
1390 spin_unlock_irqrestore(&dd->queue_lock, flags);
1391
1392 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001393 clk_disable_unprepare(dd->clk);
1394 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395
Alok Chauhanb5f53792012-08-22 19:54:45 +05301396 /* Free the spi clk, miso, mosi, cs gpio */
1397 if (!rc && dd->pdata && dd->pdata->gpio_release)
1398 dd->pdata->gpio_release();
1399 if (!rc)
1400 msm_spi_free_gpios(dd);
1401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402 if (dd->use_rlock)
1403 remote_mutex_unlock(&dd->r_lock);
1404
1405 if (pm_qos_request_active(&qos_req_list))
1406 pm_qos_update_request(&qos_req_list,
1407 PM_QOS_DEFAULT_VALUE);
1408
1409 mutex_unlock(&dd->core_lock);
1410 /* If needed, this can be done after the current message is complete,
1411 and work can be continued upon resume. No motivation for now. */
1412 if (dd->suspended)
1413 wake_up_interruptible(&dd->continue_suspend);
1414}
1415
1416static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1417{
1418 struct msm_spi *dd;
1419 unsigned long flags;
1420 struct spi_transfer *tr;
1421
1422 dd = spi_master_get_devdata(spi->master);
1423 if (dd->suspended)
1424 return -EBUSY;
1425
1426 if (list_empty(&msg->transfers) || !msg->complete)
1427 return -EINVAL;
1428
1429 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1430 /* Check message parameters */
1431 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1432 (tr->bits_per_word &&
1433 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1434 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1435 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1436 "tx=%p, rx=%p\n",
1437 tr->speed_hz, tr->bits_per_word,
1438 tr->tx_buf, tr->rx_buf);
1439 return -EINVAL;
1440 }
1441 }
1442
1443 spin_lock_irqsave(&dd->queue_lock, flags);
1444 if (dd->suspended) {
1445 spin_unlock_irqrestore(&dd->queue_lock, flags);
1446 return -EBUSY;
1447 }
1448 dd->transfer_pending = 1;
1449 list_add_tail(&msg->queue, &dd->queue);
1450 spin_unlock_irqrestore(&dd->queue_lock, flags);
1451 queue_work(dd->workqueue, &dd->work_data);
1452 return 0;
1453}
1454
1455static int msm_spi_setup(struct spi_device *spi)
1456{
1457 struct msm_spi *dd;
1458 int rc = 0;
1459 u32 spi_ioc;
1460 u32 spi_config;
1461 u32 mask;
1462
1463 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1464 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1465 __func__, spi->bits_per_word);
1466 rc = -EINVAL;
1467 }
1468 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1469 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1470 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1471 rc = -EINVAL;
1472 }
1473
1474 if (rc)
1475 goto err_setup_exit;
1476
1477 dd = spi_master_get_devdata(spi->master);
1478
1479 mutex_lock(&dd->core_lock);
1480 if (dd->suspended) {
1481 mutex_unlock(&dd->core_lock);
1482 return -EBUSY;
1483 }
1484
1485 if (dd->use_rlock)
1486 remote_mutex_lock(&dd->r_lock);
1487
Alok Chauhanb5f53792012-08-22 19:54:45 +05301488 /* Configure the spi clk, miso, mosi, cs gpio */
1489 if (dd->pdata->gpio_config) {
1490 rc = dd->pdata->gpio_config();
1491 if (rc) {
1492 dev_err(&spi->dev,
1493 "%s: error configuring GPIOs\n",
1494 __func__);
1495 rc = -ENXIO;
1496 goto err_setup_gpio;
1497 }
1498 }
1499
1500 rc = msm_spi_request_gpios(dd);
1501 if (rc) {
1502 rc = -ENXIO;
1503 goto err_setup_gpio;
1504 }
1505
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001506 clk_prepare_enable(dd->clk);
1507 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508
1509 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1510 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1511 if (spi->mode & SPI_CS_HIGH)
1512 spi_ioc |= mask;
1513 else
1514 spi_ioc &= ~mask;
1515 if (spi->mode & SPI_CPOL)
1516 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1517 else
1518 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1519
1520 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1521
1522 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1523 if (spi->mode & SPI_LOOP)
1524 spi_config |= SPI_CFG_LOOPBACK;
1525 else
1526 spi_config &= ~SPI_CFG_LOOPBACK;
1527 if (spi->mode & SPI_CPHA)
1528 spi_config &= ~SPI_CFG_INPUT_FIRST;
1529 else
1530 spi_config |= SPI_CFG_INPUT_FIRST;
1531 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1532
1533 /* Ensure previous write completed before disabling the clocks */
1534 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001535 clk_disable_unprepare(dd->clk);
1536 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001537
Alok Chauhanb5f53792012-08-22 19:54:45 +05301538 /* Free the spi clk, miso, mosi, cs gpio */
1539 if (dd->pdata && dd->pdata->gpio_release)
1540 dd->pdata->gpio_release();
1541 msm_spi_free_gpios(dd);
1542
1543err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001544 if (dd->use_rlock)
1545 remote_mutex_unlock(&dd->r_lock);
1546 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001547err_setup_exit:
1548 return rc;
1549}
1550
1551#ifdef CONFIG_DEBUG_FS
1552static int debugfs_iomem_x32_set(void *data, u64 val)
1553{
1554 writel_relaxed(val, data);
1555 /* Ensure the previous write completed. */
1556 mb();
1557 return 0;
1558}
1559
1560static int debugfs_iomem_x32_get(void *data, u64 *val)
1561{
1562 *val = readl_relaxed(data);
1563 /* Ensure the previous read completed. */
1564 mb();
1565 return 0;
1566}
1567
1568DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1569 debugfs_iomem_x32_set, "0x%08llx\n");
1570
1571static void spi_debugfs_init(struct msm_spi *dd)
1572{
1573 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1574 if (dd->dent_spi) {
1575 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1578 dd->debugfs_spi_regs[i] =
1579 debugfs_create_file(
1580 debugfs_spi_regs[i].name,
1581 debugfs_spi_regs[i].mode,
1582 dd->dent_spi,
1583 dd->base + debugfs_spi_regs[i].offset,
1584 &fops_iomem_x32);
1585 }
1586 }
1587}
1588
1589static void spi_debugfs_exit(struct msm_spi *dd)
1590{
1591 if (dd->dent_spi) {
1592 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594 debugfs_remove_recursive(dd->dent_spi);
1595 dd->dent_spi = NULL;
1596 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1597 dd->debugfs_spi_regs[i] = NULL;
1598 }
1599}
1600#else
1601static void spi_debugfs_init(struct msm_spi *dd) {}
1602static void spi_debugfs_exit(struct msm_spi *dd) {}
1603#endif
1604
1605/* ===Device attributes begin=== */
1606static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1607 char *buf)
1608{
1609 struct spi_master *master = dev_get_drvdata(dev);
1610 struct msm_spi *dd = spi_master_get_devdata(master);
1611
1612 return snprintf(buf, PAGE_SIZE,
1613 "Device %s\n"
1614 "rx fifo_size = %d spi words\n"
1615 "tx fifo_size = %d spi words\n"
1616 "use_dma ? %s\n"
1617 "rx block size = %d bytes\n"
1618 "tx block size = %d bytes\n"
Alok Chauhanc27843e2013-02-15 16:04:20 +05301619 "input burst size = %d bytes\n"
1620 "output burst size = %d bytes\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001621 "DMA configuration:\n"
1622 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1623 "--statistics--\n"
1624 "Rx isrs = %d\n"
1625 "Tx isrs = %d\n"
1626 "DMA error = %d\n"
1627 "--debug--\n"
1628 "NA yet\n",
1629 dev_name(dev),
1630 dd->input_fifo_size,
1631 dd->output_fifo_size,
1632 dd->use_dma ? "yes" : "no",
1633 dd->input_block_size,
1634 dd->output_block_size,
Alok Chauhanc27843e2013-02-15 16:04:20 +05301635 dd->input_burst_size,
1636 dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 dd->tx_dma_chan,
1638 dd->rx_dma_chan,
1639 dd->tx_dma_crci,
1640 dd->rx_dma_crci,
1641 dd->stat_rx + dd->stat_dmov_rx,
1642 dd->stat_tx + dd->stat_dmov_tx,
1643 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1644 );
1645}
1646
1647/* Reset statistics on write */
1648static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1649 const char *buf, size_t count)
1650{
1651 struct msm_spi *dd = dev_get_drvdata(dev);
1652 dd->stat_rx = 0;
1653 dd->stat_tx = 0;
1654 dd->stat_dmov_rx = 0;
1655 dd->stat_dmov_tx = 0;
1656 dd->stat_dmov_rx_err = 0;
1657 dd->stat_dmov_tx_err = 0;
1658 return count;
1659}
1660
1661static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1662
1663static struct attribute *dev_attrs[] = {
1664 &dev_attr_stats.attr,
1665 NULL,
1666};
1667
1668static struct attribute_group dev_attr_grp = {
1669 .attrs = dev_attrs,
1670};
1671/* ===Device attributes end=== */
1672
1673/**
1674 * spi_dmov_tx_complete_func - DataMover tx completion callback
1675 *
1676 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1677 * spinlock @msm_dmov_lock held.
1678 */
1679static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1680 unsigned int result,
1681 struct msm_dmov_errdata *err)
1682{
1683 struct msm_spi *dd;
1684
1685 if (!(result & DMOV_RSLT_VALID)) {
1686 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1687 return;
1688 }
1689 /* restore original context */
1690 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301691 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301693 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1694 return;
1695 complete(&dd->transfer_complete);
1696 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001697 /* Error or flush */
1698 if (result & DMOV_RSLT_ERROR) {
1699 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1700 dd->stat_dmov_tx_err++;
1701 }
1702 if (result & DMOV_RSLT_FLUSH) {
1703 /*
1704 * Flushing normally happens in process of
1705 * removing, when we are waiting for outstanding
1706 * DMA commands to be flushed.
1707 */
1708 dev_info(dd->dev,
1709 "DMA channel flushed (0x%08x)\n", result);
1710 }
1711 if (err)
1712 dev_err(dd->dev,
1713 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1714 err->flush[0], err->flush[1], err->flush[2],
1715 err->flush[3], err->flush[4], err->flush[5]);
1716 dd->cur_msg->status = -EIO;
1717 complete(&dd->transfer_complete);
1718 }
1719}
1720
1721/**
1722 * spi_dmov_rx_complete_func - DataMover rx completion callback
1723 *
1724 * Executed in IRQ context (Data Mover's IRQ)
1725 * DataMover's spinlock @msm_dmov_lock held.
1726 */
1727static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1728 unsigned int result,
1729 struct msm_dmov_errdata *err)
1730{
1731 struct msm_spi *dd;
1732
1733 if (!(result & DMOV_RSLT_VALID)) {
1734 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1735 result, cmd);
1736 return;
1737 }
1738 /* restore original context */
1739 dd = container_of(cmd, struct msm_spi, rx_hdr);
1740 if (result & DMOV_RSLT_DONE) {
1741 dd->stat_dmov_rx++;
1742 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1743 return;
1744 complete(&dd->transfer_complete);
1745 } else {
1746 /** Error or flush */
1747 if (result & DMOV_RSLT_ERROR) {
1748 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1749 dd->stat_dmov_rx_err++;
1750 }
1751 if (result & DMOV_RSLT_FLUSH) {
1752 dev_info(dd->dev,
1753 "DMA channel flushed(0x%08x)\n", result);
1754 }
1755 if (err)
1756 dev_err(dd->dev,
1757 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1758 err->flush[0], err->flush[1], err->flush[2],
1759 err->flush[3], err->flush[4], err->flush[5]);
1760 dd->cur_msg->status = -EIO;
1761 complete(&dd->transfer_complete);
1762 }
1763}
1764
Alok Chauhanc27843e2013-02-15 16:04:20 +05301765static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
1766 int output_burst_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001767{
1768 u32 cache_line = dma_get_cache_alignment();
Alok Chauhanc27843e2013-02-15 16:04:20 +05301769 int burst_size = (input_burst_size > output_burst_size) ?
1770 input_burst_size : output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771
1772 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
Alok Chauhanc27843e2013-02-15 16:04:20 +05301773 roundup(burst_size, cache_line))*2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001774}
1775
1776static void msm_spi_teardown_dma(struct msm_spi *dd)
1777{
1778 int limit = 0;
1779
1780 if (!dd->use_dma)
1781 return;
1782
1783 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001784 msm_dmov_flush(dd->tx_dma_chan, 1);
1785 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 msleep(10);
1787 }
1788
Alok Chauhanc27843e2013-02-15 16:04:20 +05301789 dma_free_coherent(NULL,
1790 get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
1791 dd->tx_dmov_cmd,
1792 dd->tx_dmov_cmd_dma);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1794 dd->tx_padding = dd->rx_padding = NULL;
1795}
1796
1797static __init int msm_spi_init_dma(struct msm_spi *dd)
1798{
1799 dmov_box *box;
1800 u32 cache_line = dma_get_cache_alignment();
1801
1802 /* Allocate all as one chunk, since all is smaller than page size */
1803
1804 /* We send NULL device, since it requires coherent_dma_mask id
1805 device definition, we're okay with using system pool */
Alok Chauhanc27843e2013-02-15 16:04:20 +05301806 dd->tx_dmov_cmd
1807 = dma_alloc_coherent(NULL,
1808 get_chunk_size(dd, dd->input_burst_size,
1809 dd->output_burst_size),
1810 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811 if (dd->tx_dmov_cmd == NULL)
1812 return -ENOMEM;
1813
1814 /* DMA addresses should be 64 bit aligned aligned */
1815 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1816 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1817 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1818 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1819
1820 /* Buffers should be aligned to cache line */
1821 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1822 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1823 sizeof(struct spi_dmov_cmd), cache_line);
Alok Chauhanc27843e2013-02-15 16:04:20 +05301824 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
1825 dd->output_burst_size), cache_line);
1826 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827 cache_line);
1828
1829 /* Setup DM commands */
1830 box = &(dd->rx_dmov_cmd->box);
1831 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1832 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1833 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1834 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1835 offsetof(struct spi_dmov_cmd, cmd_ptr));
1836 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837
1838 box = &(dd->tx_dmov_cmd->box);
1839 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1840 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1841 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1842 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1843 offsetof(struct spi_dmov_cmd, cmd_ptr));
1844 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001845
1846 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1847 CMD_DST_CRCI(dd->tx_dma_crci);
1848 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1849 SPI_OUTPUT_FIFO;
1850 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1851 CMD_SRC_CRCI(dd->rx_dma_crci);
1852 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1853 SPI_INPUT_FIFO;
1854
1855 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001856 msm_dmov_flush(dd->tx_dma_chan, 1);
1857 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858
1859 return 0;
1860}
1861
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001862struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1863{
1864 struct device_node *node = pdev->dev.of_node;
1865 struct msm_spi_platform_data *pdata;
1866
1867 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1868 if (!pdata) {
1869 pr_err("Unable to allocate platform data\n");
1870 return NULL;
1871 }
1872
1873 of_property_read_u32(node, "spi-max-frequency",
1874 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05301875 of_property_read_u32(node, "infinite_mode",
1876 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001877
1878 return pdata;
1879}
1880
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881static int __init msm_spi_probe(struct platform_device *pdev)
1882{
1883 struct spi_master *master;
1884 struct msm_spi *dd;
1885 struct resource *resource;
1886 int rc = -ENXIO;
1887 int locked = 0;
1888 int i = 0;
1889 int clk_enabled = 0;
1890 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001891 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001892 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001893
1894 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1895 if (!master) {
1896 rc = -ENOMEM;
1897 dev_err(&pdev->dev, "master allocation failed\n");
1898 goto err_probe_exit;
1899 }
1900
1901 master->bus_num = pdev->id;
1902 master->mode_bits = SPI_SUPPORTED_MODES;
1903 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1904 master->setup = msm_spi_setup;
1905 master->transfer = msm_spi_transfer;
1906 platform_set_drvdata(pdev, master);
1907 dd = spi_master_get_devdata(master);
1908
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001909 if (pdev->dev.of_node) {
1910 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1911 master->dev.of_node = pdev->dev.of_node;
1912 pdata = msm_spi_dt_to_pdata(pdev);
1913 if (!pdata) {
1914 rc = -ENOMEM;
1915 goto err_probe_exit;
1916 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001917
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06001918 rc = of_property_read_u32(pdev->dev.of_node,
1919 "cell-index", &pdev->id);
1920 if (rc)
1921 dev_warn(&pdev->dev,
1922 "using default bus_num %d\n", pdev->id);
1923 else
1924 master->bus_num = pdev->id;
1925
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001926 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1927 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1928 i, &flags);
1929 }
1930
1931 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1932 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1933 pdev->dev.of_node, "cs-gpios",
1934 i, &flags);
1935 dd->cs_gpios[i].valid = 0;
1936 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001937 } else {
1938 pdata = pdev->dev.platform_data;
1939 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001940
1941 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1942 resource = platform_get_resource(pdev, IORESOURCE_IO,
1943 i);
1944 dd->spi_gpios[i] = resource ? resource->start : -1;
1945 }
1946
1947 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1948 resource = platform_get_resource(pdev, IORESOURCE_IO,
1949 i + ARRAY_SIZE(spi_rsrcs));
1950 dd->cs_gpios[i].gpio_num = resource ?
1951 resource->start : -1;
1952 dd->cs_gpios[i].valid = 0;
1953 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001954 }
1955
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001957 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 if (!resource) {
1959 rc = -ENXIO;
1960 goto err_probe_res;
1961 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963 dd->mem_phys_addr = resource->start;
1964 dd->mem_size = resource_size(resource);
1965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 if (pdata) {
1967 if (pdata->dma_config) {
1968 rc = pdata->dma_config();
1969 if (rc) {
1970 dev_warn(&pdev->dev,
1971 "%s: DM mode not supported\n",
1972 __func__);
1973 dd->use_dma = 0;
1974 goto skip_dma_resources;
1975 }
1976 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001977 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 if (resource) {
1979 dd->rx_dma_chan = resource->start;
1980 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001981 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1982 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001983 if (!resource) {
1984 rc = -ENXIO;
1985 goto err_probe_res;
1986 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 dd->rx_dma_crci = resource->start;
1989 dd->tx_dma_crci = resource->end;
1990 dd->use_dma = 1;
1991 master->dma_alignment = dma_get_cache_alignment();
1992 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 }
1994
Alok Chauhanb5f53792012-08-22 19:54:45 +05301995skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06001996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 spin_lock_init(&dd->queue_lock);
1998 mutex_init(&dd->core_lock);
1999 INIT_LIST_HEAD(&dd->queue);
2000 INIT_WORK(&dd->work_data, msm_spi_workq);
2001 init_waitqueue_head(&dd->continue_suspend);
2002 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002003 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004 if (!dd->workqueue)
2005 goto err_probe_workq;
2006
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002007 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2008 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009 rc = -ENXIO;
2010 goto err_probe_reqmem;
2011 }
2012
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002013 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2014 if (!dd->base) {
2015 rc = -ENOMEM;
2016 goto err_probe_reqmem;
2017 }
2018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019 if (pdata && pdata->rsl_id) {
2020 struct remote_mutex_id rmid;
2021 rmid.r_spinlock_id = pdata->rsl_id;
2022 rmid.delay_us = SPI_TRYLOCK_DELAY;
2023
2024 rc = remote_mutex_init(&dd->r_lock, &rmid);
2025 if (rc) {
2026 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2027 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2028 __func__, rc);
2029 goto err_probe_rlock_init;
2030 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002032 dd->use_rlock = 1;
2033 dd->pm_lat = pdata->pm_lat;
Alok Chauhanb5f53792012-08-22 19:54:45 +05302034 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
2035 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002038 mutex_lock(&dd->core_lock);
2039 if (dd->use_rlock)
2040 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002041
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002042 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002044 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002046 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047 rc = PTR_ERR(dd->clk);
2048 goto err_probe_clk_get;
2049 }
2050
Matt Wagantallac294852011-08-17 15:44:58 -07002051 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002053 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002054 rc = PTR_ERR(dd->pclk);
2055 goto err_probe_pclk_get;
2056 }
2057
2058 if (pdata && pdata->max_clock_speed)
2059 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2060
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002061 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002062 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002063 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 __func__);
2065 goto err_probe_clk_enable;
2066 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002067
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002068 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002069 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002070 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002071 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002072 __func__);
2073 goto err_probe_pclk_enable;
2074 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002076 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002077 rc = msm_spi_configure_gsbi(dd, pdev);
2078 if (rc)
2079 goto err_probe_gsbi;
2080
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 msm_spi_calculate_fifo_size(dd);
2082 if (dd->use_dma) {
2083 rc = msm_spi_init_dma(dd);
2084 if (rc)
2085 goto err_probe_dma;
2086 }
2087
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002088 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089 /*
2090 * The SPI core generates a bogus input overrun error on some targets,
2091 * when a transition from run to reset state occurs and if the FIFO has
2092 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2093 * bit.
2094 */
2095 msm_spi_enable_error_flags(dd);
2096
2097 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2098 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2099 if (rc)
2100 goto err_probe_state;
2101
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002102 clk_disable_unprepare(dd->clk);
2103 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002104 clk_enabled = 0;
2105 pclk_enabled = 0;
2106
2107 dd->suspended = 0;
2108 dd->transfer_pending = 0;
2109 dd->multi_xfr = 0;
2110 dd->mode = SPI_MODE_NONE;
2111
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002112 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002113 if (rc)
2114 goto err_probe_irq;
2115
2116 msm_spi_disable_irqs(dd);
2117 if (dd->use_rlock)
2118 remote_mutex_unlock(&dd->r_lock);
2119
2120 mutex_unlock(&dd->core_lock);
2121 locked = 0;
2122
2123 rc = spi_register_master(master);
2124 if (rc)
2125 goto err_probe_reg_master;
2126
2127 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2128 if (rc) {
2129 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2130 goto err_attrs;
2131 }
2132
2133 spi_debugfs_init(dd);
Kiran Gunda2b285652012-07-30 13:22:39 +05302134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002135 return 0;
2136
2137err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002138 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002139err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140err_probe_irq:
2141err_probe_state:
2142 msm_spi_teardown_dma(dd);
2143err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002144err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002145 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002146 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002147err_probe_pclk_enable:
2148 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002149 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002150err_probe_clk_enable:
2151 clk_put(dd->pclk);
2152err_probe_pclk_get:
2153 clk_put(dd->clk);
2154err_probe_clk_get:
2155 if (locked) {
2156 if (dd->use_rlock)
2157 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002159 mutex_unlock(&dd->core_lock);
2160 }
2161err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002162err_probe_reqmem:
2163 destroy_workqueue(dd->workqueue);
2164err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165err_probe_res:
2166 spi_master_put(master);
2167err_probe_exit:
2168 return rc;
2169}
2170
2171#ifdef CONFIG_PM
2172static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2173{
2174 struct spi_master *master = platform_get_drvdata(pdev);
2175 struct msm_spi *dd;
2176 unsigned long flags;
2177
2178 if (!master)
2179 goto suspend_exit;
2180 dd = spi_master_get_devdata(master);
2181 if (!dd)
2182 goto suspend_exit;
2183
2184 /* Make sure nothing is added to the queue while we're suspending */
2185 spin_lock_irqsave(&dd->queue_lock, flags);
2186 dd->suspended = 1;
2187 spin_unlock_irqrestore(&dd->queue_lock, flags);
2188
2189 /* Wait for transactions to end, or time out */
2190 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002191
2192suspend_exit:
2193 return 0;
2194}
2195
2196static int msm_spi_resume(struct platform_device *pdev)
2197{
2198 struct spi_master *master = platform_get_drvdata(pdev);
2199 struct msm_spi *dd;
2200
2201 if (!master)
2202 goto resume_exit;
2203 dd = spi_master_get_devdata(master);
2204 if (!dd)
2205 goto resume_exit;
2206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002207 dd->suspended = 0;
2208resume_exit:
2209 return 0;
2210}
2211#else
2212#define msm_spi_suspend NULL
2213#define msm_spi_resume NULL
2214#endif /* CONFIG_PM */
2215
2216static int __devexit msm_spi_remove(struct platform_device *pdev)
2217{
2218 struct spi_master *master = platform_get_drvdata(pdev);
2219 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002220
2221 pm_qos_remove_request(&qos_req_list);
2222 spi_debugfs_exit(dd);
2223 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002225 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002227 clk_put(dd->clk);
2228 clk_put(dd->pclk);
2229 destroy_workqueue(dd->workqueue);
2230 platform_set_drvdata(pdev, 0);
2231 spi_unregister_master(master);
2232 spi_master_put(master);
2233
2234 return 0;
2235}
2236
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002237static struct of_device_id msm_spi_dt_match[] = {
2238 {
2239 .compatible = "qcom,spi-qup-v2",
2240 },
2241 {}
2242};
2243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002244static struct platform_driver msm_spi_driver = {
2245 .driver = {
2246 .name = SPI_DRV_NAME,
2247 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002248 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002249 },
2250 .suspend = msm_spi_suspend,
2251 .resume = msm_spi_resume,
2252 .remove = __exit_p(msm_spi_remove),
2253};
2254
2255static int __init msm_spi_init(void)
2256{
2257 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2258}
2259module_init(msm_spi_init);
2260
2261static void __exit msm_spi_exit(void)
2262{
2263 platform_driver_unregister(&msm_spi_driver);
2264}
2265module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002266
2267MODULE_LICENSE("GPL v2");
2268MODULE_VERSION("0.4");
2269MODULE_ALIAS("platform:"SPI_DRV_NAME);