blob: 152bbb4a4eb6895b4cc7f181f8e412c0492356c7 [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/list.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/interrupt.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/workqueue.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <mach/msm_spi.h>
33#include <linux/dma-mapping.h>
34#include <linux/sched.h>
35#include <mach/dma.h>
36#include <asm/atomic.h>
37#include <linux/mutex.h>
38#include <linux/gpio.h>
39#include <linux/remote_spinlock.h>
40#include <linux/pm_qos_params.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070041#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070042#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070043#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070045static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
46 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047{
48 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070049 unsigned long gsbi_mem_phys_addr;
50 size_t gsbi_mem_size;
51 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070053 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055 return 0;
56
57 gsbi_mem_phys_addr = resource->start;
58 gsbi_mem_size = resource_size(resource);
59 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
60 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070062
63 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
64 gsbi_mem_size);
65 if (!gsbi_base)
66 return -ENXIO;
67
68 /* Set GSBI to SPI mode */
69 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71 return 0;
72}
73
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070074static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070076 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
77 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
78 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
79 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
80 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
81 if (dd->qup_ver)
82 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083}
84
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085static inline int msm_spi_request_gpios(struct msm_spi *dd)
86{
87 int i;
88 int result = 0;
89
90 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
91 if (dd->spi_gpios[i] >= 0) {
92 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
93 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060094 dev_err(dd->dev, "%s: gpio_request for pin %d "
95 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 dd->spi_gpios[i], result);
97 goto error;
98 }
99 }
100 }
101 return 0;
102
103error:
104 for (; --i >= 0;) {
105 if (dd->spi_gpios[i] >= 0)
106 gpio_free(dd->spi_gpios[i]);
107 }
108 return result;
109}
110
111static inline void msm_spi_free_gpios(struct msm_spi *dd)
112{
113 int i;
114
115 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
116 if (dd->spi_gpios[i] >= 0)
117 gpio_free(dd->spi_gpios[i]);
118 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600119
120 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
121 if (dd->cs_gpios[i].valid) {
122 gpio_free(dd->cs_gpios[i].gpio_num);
123 dd->cs_gpios[i].valid = 0;
124 }
125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126}
127
128static void msm_spi_clock_set(struct msm_spi *dd, int speed)
129{
130 int rc;
131
132 rc = clk_set_rate(dd->clk, speed);
133 if (!rc)
134 dd->clock_speed = speed;
135}
136
137static int msm_spi_calculate_size(int *fifo_size,
138 int *block_size,
139 int block,
140 int mult)
141{
142 int words;
143
144 switch (block) {
145 case 0:
146 words = 1; /* 4 bytes */
147 break;
148 case 1:
149 words = 4; /* 16 bytes */
150 break;
151 case 2:
152 words = 8; /* 32 bytes */
153 break;
154 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700155 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158 switch (mult) {
159 case 0:
160 *fifo_size = words * 2;
161 break;
162 case 1:
163 *fifo_size = words * 4;
164 break;
165 case 2:
166 *fifo_size = words * 8;
167 break;
168 case 3:
169 *fifo_size = words * 16;
170 break;
171 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700172 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 *block_size = words * sizeof(u32); /* in bytes */
176 return 0;
177}
178
179static void get_next_transfer(struct msm_spi *dd)
180{
181 struct spi_transfer *t = dd->cur_transfer;
182
183 if (t->transfer_list.next != &dd->cur_msg->transfers) {
184 dd->cur_transfer = list_entry(t->transfer_list.next,
185 struct spi_transfer,
186 transfer_list);
187 dd->write_buf = dd->cur_transfer->tx_buf;
188 dd->read_buf = dd->cur_transfer->rx_buf;
189 }
190}
191
192static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
193{
194 u32 spi_iom;
195 int block;
196 int mult;
197
198 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
199
200 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
201 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
202 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
203 block, mult)) {
204 goto fifo_size_err;
205 }
206
207 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
208 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
209 if (msm_spi_calculate_size(&dd->output_fifo_size,
210 &dd->output_block_size, block, mult)) {
211 goto fifo_size_err;
212 }
213 /* DM mode is not available for this block size */
214 if (dd->input_block_size == 4 || dd->output_block_size == 4)
215 dd->use_dma = 0;
216
217 /* DM mode is currently unsupported for different block sizes */
218 if (dd->input_block_size != dd->output_block_size)
219 dd->use_dma = 0;
220
221 if (dd->use_dma)
222 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
223
224 return;
225
226fifo_size_err:
227 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700228 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 return;
230}
231
232static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
233{
234 u32 data_in;
235 int i;
236 int shift;
237
238 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
239 if (dd->read_buf) {
240 for (i = 0; (i < dd->bytes_per_word) &&
241 dd->rx_bytes_remaining; i++) {
242 /* The data format depends on bytes_per_word:
243 4 bytes: 0x12345678
244 3 bytes: 0x00123456
245 2 bytes: 0x00001234
246 1 byte : 0x00000012
247 */
248 shift = 8 * (dd->bytes_per_word - i - 1);
249 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
250 dd->rx_bytes_remaining--;
251 }
252 } else {
253 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
254 dd->rx_bytes_remaining -= dd->bytes_per_word;
255 else
256 dd->rx_bytes_remaining = 0;
257 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 dd->read_xfr_cnt++;
260 if (dd->multi_xfr) {
261 if (!dd->rx_bytes_remaining)
262 dd->read_xfr_cnt = 0;
263 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
264 dd->read_len) {
265 struct spi_transfer *t = dd->cur_rx_transfer;
266 if (t->transfer_list.next != &dd->cur_msg->transfers) {
267 t = list_entry(t->transfer_list.next,
268 struct spi_transfer,
269 transfer_list);
270 dd->read_buf = t->rx_buf;
271 dd->read_len = t->len;
272 dd->read_xfr_cnt = 0;
273 dd->cur_rx_transfer = t;
274 }
275 }
276 }
277}
278
279static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
280{
281 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
282
283 return spi_op & SPI_OP_STATE_VALID;
284}
285
286static inline int msm_spi_wait_valid(struct msm_spi *dd)
287{
288 unsigned long delay = 0;
289 unsigned long timeout = 0;
290
291 if (dd->clock_speed == 0)
292 return -EINVAL;
293 /*
294 * Based on the SPI clock speed, sufficient time
295 * should be given for the SPI state transition
296 * to occur
297 */
298 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
299 /*
300 * For small delay values, the default timeout would
301 * be one jiffy
302 */
303 if (delay < SPI_DELAY_THRESHOLD)
304 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600305
306 /* Adding one to round off to the nearest jiffy */
307 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308 while (!msm_spi_is_valid_state(dd)) {
309 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600310 if (!msm_spi_is_valid_state(dd)) {
311 if (dd->cur_msg)
312 dd->cur_msg->status = -EIO;
313 dev_err(dd->dev, "%s: SPI operational state"
314 "not valid\n", __func__);
315 return -ETIMEDOUT;
316 } else
317 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 }
319 /*
320 * For smaller values of delay, context switch time
321 * would negate the usage of usleep
322 */
323 if (delay > 20)
324 usleep(delay);
325 else if (delay)
326 udelay(delay);
327 }
328 return 0;
329}
330
331static inline int msm_spi_set_state(struct msm_spi *dd,
332 enum msm_spi_state state)
333{
334 enum msm_spi_state cur_state;
335 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700336 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337 cur_state = readl_relaxed(dd->base + SPI_STATE);
338 /* Per spec:
339 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
340 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
341 (state == SPI_OP_STATE_RESET)) {
342 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
343 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
344 } else {
345 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
346 dd->base + SPI_STATE);
347 }
348 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700349 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350
351 return 0;
352}
353
354static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
355{
356 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
357
358 if (n != (*config & SPI_CFG_N))
359 *config = (*config & ~SPI_CFG_N) | n;
360
361 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
362 if (dd->read_buf == NULL)
363 *config |= SPI_NO_INPUT;
364 if (dd->write_buf == NULL)
365 *config |= SPI_NO_OUTPUT;
366 }
367}
368
369static void msm_spi_set_config(struct msm_spi *dd, int bpw)
370{
371 u32 spi_config;
372
373 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
374
375 if (dd->cur_msg->spi->mode & SPI_CPHA)
376 spi_config &= ~SPI_CFG_INPUT_FIRST;
377 else
378 spi_config |= SPI_CFG_INPUT_FIRST;
379 if (dd->cur_msg->spi->mode & SPI_LOOP)
380 spi_config |= SPI_CFG_LOOPBACK;
381 else
382 spi_config &= ~SPI_CFG_LOOPBACK;
383 msm_spi_add_configs(dd, &spi_config, bpw-1);
384 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
385 msm_spi_set_qup_config(dd, bpw);
386}
387
388static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
389{
390 dmov_box *box;
391 int bytes_to_send, num_rows, bytes_sent;
392 u32 num_transfers;
393
394 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530395 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 if (dd->write_len && !dd->read_len) {
397 /* WR-WR transfer */
398 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
399 dd->write_buf = dd->temp_buf;
400 } else {
401 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
402 /* For WR-RD transfer, bytes_sent can be negative */
403 if (bytes_sent < 0)
404 bytes_sent = 0;
405 }
406
407 /* We'll send in chunks of SPI_MAX_LEN if larger */
408 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
409 SPI_MAX_LEN : dd->tx_bytes_remaining;
410 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
411 dd->unaligned_len = bytes_to_send % dd->burst_size;
412 num_rows = bytes_to_send / dd->burst_size;
413
414 dd->mode = SPI_DMOV_MODE;
415
416 if (num_rows) {
417 /* src in 16 MSB, dst in 16 LSB */
418 box = &dd->tx_dmov_cmd->box;
419 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
420 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
421 box->num_rows = (num_rows << 16) | num_rows;
422 box->row_offset = (dd->burst_size << 16) | 0;
423
424 box = &dd->rx_dmov_cmd->box;
425 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
426 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
427 box->num_rows = (num_rows << 16) | num_rows;
428 box->row_offset = (0 << 16) | dd->burst_size;
429
430 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
431 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
432 offsetof(struct spi_dmov_cmd, box));
433 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
434 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
435 offsetof(struct spi_dmov_cmd, box));
436 } else {
437 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
438 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
439 offsetof(struct spi_dmov_cmd, single_pad));
440 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
441 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
442 offsetof(struct spi_dmov_cmd, single_pad));
443 }
444
445 if (!dd->unaligned_len) {
446 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
447 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
448 } else {
449 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
450 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
451 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
452
453 if ((dd->multi_xfr) && (dd->read_len <= 0))
454 offset = dd->cur_msg_len - dd->unaligned_len;
455
456 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
457 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
458
459 memset(dd->tx_padding, 0, dd->burst_size);
460 memset(dd->rx_padding, 0, dd->burst_size);
461 if (dd->write_buf)
462 memcpy(dd->tx_padding, dd->write_buf + offset,
463 dd->unaligned_len);
464
465 tx_cmd->src = dd->tx_padding_dma;
466 rx_cmd->dst = dd->rx_padding_dma;
467 tx_cmd->len = rx_cmd->len = dd->burst_size;
468 }
469 /* This also takes care of the padding dummy buf
470 Since this is set to the correct length, the
471 dummy bytes won't be actually sent */
472 if (dd->multi_xfr) {
473 u32 write_transfers = 0;
474 u32 read_transfers = 0;
475
476 if (dd->write_len > 0) {
477 write_transfers = DIV_ROUND_UP(dd->write_len,
478 dd->bytes_per_word);
479 writel_relaxed(write_transfers,
480 dd->base + SPI_MX_OUTPUT_COUNT);
481 }
482 if (dd->read_len > 0) {
483 /*
484 * The read following a write transfer must take
485 * into account, that the bytes pertaining to
486 * the write transfer needs to be discarded,
487 * before the actual read begins.
488 */
489 read_transfers = DIV_ROUND_UP(dd->read_len +
490 dd->write_len,
491 dd->bytes_per_word);
492 writel_relaxed(read_transfers,
493 dd->base + SPI_MX_INPUT_COUNT);
494 }
495 } else {
496 if (dd->write_buf)
497 writel_relaxed(num_transfers,
498 dd->base + SPI_MX_OUTPUT_COUNT);
499 if (dd->read_buf)
500 writel_relaxed(num_transfers,
501 dd->base + SPI_MX_INPUT_COUNT);
502 }
503}
504
505static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
506{
507 dma_coherent_pre_ops();
508 if (dd->write_buf)
509 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
510 if (dd->read_buf)
511 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
512}
513
514/* SPI core can send maximum of 4K transfers, because there is HW problem
515 with infinite mode.
516 Therefore, we are sending several chunks of 3K or less (depending on how
517 much is left).
518 Upon completion we send the next chunk, or complete the transfer if
519 everything is finished.
520*/
521static int msm_spi_dm_send_next(struct msm_spi *dd)
522{
523 /* By now we should have sent all the bytes in FIFO mode,
524 * However to make things right, we'll check anyway.
525 */
526 if (dd->mode != SPI_DMOV_MODE)
527 return 0;
528
529 /* We need to send more chunks, if we sent max last time */
530 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
531 dd->tx_bytes_remaining -= SPI_MAX_LEN;
532 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
533 return 0;
534 dd->read_len = dd->write_len = 0;
535 msm_spi_setup_dm_transfer(dd);
536 msm_spi_enqueue_dm_commands(dd);
537 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
538 return 0;
539 return 1;
540 } else if (dd->read_len && dd->write_len) {
541 dd->tx_bytes_remaining -= dd->cur_transfer->len;
542 if (list_is_last(&dd->cur_transfer->transfer_list,
543 &dd->cur_msg->transfers))
544 return 0;
545 get_next_transfer(dd);
546 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
547 return 0;
548 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
549 dd->read_buf = dd->temp_buf;
550 dd->read_len = dd->write_len = -1;
551 msm_spi_setup_dm_transfer(dd);
552 msm_spi_enqueue_dm_commands(dd);
553 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
554 return 0;
555 return 1;
556 }
557 return 0;
558}
559
560static inline void msm_spi_ack_transfer(struct msm_spi *dd)
561{
562 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
563 SPI_OP_MAX_OUTPUT_DONE_FLAG,
564 dd->base + SPI_OPERATIONAL);
565 /* Ensure done flag was cleared before proceeding further */
566 mb();
567}
568
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700569/* Figure which irq occured and call the relevant functions */
570static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
571{
572 u32 op, ret = IRQ_NONE;
573 struct msm_spi *dd = dev_id;
574
575 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
576 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
577 struct spi_master *master = dev_get_drvdata(dd->dev);
578 ret |= msm_spi_error_irq(irq, master);
579 }
580
581 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
582 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
583 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
584 dd->base + SPI_OPERATIONAL);
585 /*
586 * Ensure service flag was cleared before further
587 * processing of interrupt.
588 */
589 mb();
590 ret |= msm_spi_input_irq(irq, dev_id);
591 }
592
593 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
594 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
595 dd->base + SPI_OPERATIONAL);
596 /*
597 * Ensure service flag was cleared before further
598 * processing of interrupt.
599 */
600 mb();
601 ret |= msm_spi_output_irq(irq, dev_id);
602 }
603
604 if (dd->done) {
605 complete(&dd->transfer_complete);
606 dd->done = 0;
607 }
608 return ret;
609}
610
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
612{
613 struct msm_spi *dd = dev_id;
614
615 dd->stat_rx++;
616
617 if (dd->mode == SPI_MODE_NONE)
618 return IRQ_HANDLED;
619
620 if (dd->mode == SPI_DMOV_MODE) {
621 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
622 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
623 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
624 msm_spi_ack_transfer(dd);
625 if (dd->unaligned_len == 0) {
626 if (atomic_inc_return(&dd->rx_irq_called) == 1)
627 return IRQ_HANDLED;
628 }
629 msm_spi_complete(dd);
630 return IRQ_HANDLED;
631 }
632 return IRQ_NONE;
633 }
634
635 if (dd->mode == SPI_FIFO_MODE) {
636 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
637 SPI_OP_IP_FIFO_NOT_EMPTY) &&
638 (dd->rx_bytes_remaining > 0)) {
639 msm_spi_read_word_from_fifo(dd);
640 }
641 if (dd->rx_bytes_remaining == 0)
642 msm_spi_complete(dd);
643 }
644
645 return IRQ_HANDLED;
646}
647
648static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
649{
650 u32 word;
651 u8 byte;
652 int i;
653
654 word = 0;
655 if (dd->write_buf) {
656 for (i = 0; (i < dd->bytes_per_word) &&
657 dd->tx_bytes_remaining; i++) {
658 dd->tx_bytes_remaining--;
659 byte = *dd->write_buf++;
660 word |= (byte << (BITS_PER_BYTE * (3 - i)));
661 }
662 } else
663 if (dd->tx_bytes_remaining > dd->bytes_per_word)
664 dd->tx_bytes_remaining -= dd->bytes_per_word;
665 else
666 dd->tx_bytes_remaining = 0;
667 dd->write_xfr_cnt++;
668 if (dd->multi_xfr) {
669 if (!dd->tx_bytes_remaining)
670 dd->write_xfr_cnt = 0;
671 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
672 dd->write_len) {
673 struct spi_transfer *t = dd->cur_tx_transfer;
674 if (t->transfer_list.next != &dd->cur_msg->transfers) {
675 t = list_entry(t->transfer_list.next,
676 struct spi_transfer,
677 transfer_list);
678 dd->write_buf = t->tx_buf;
679 dd->write_len = t->len;
680 dd->write_xfr_cnt = 0;
681 dd->cur_tx_transfer = t;
682 }
683 }
684 }
685 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
686}
687
688static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
689{
690 int count = 0;
691
692 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
693 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
694 SPI_OP_OUTPUT_FIFO_FULL)) {
695 msm_spi_write_word_to_fifo(dd);
696 count++;
697 }
698}
699
700static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
701{
702 struct msm_spi *dd = dev_id;
703
704 dd->stat_tx++;
705
706 if (dd->mode == SPI_MODE_NONE)
707 return IRQ_HANDLED;
708
709 if (dd->mode == SPI_DMOV_MODE) {
710 /* TX_ONLY transaction is handled here
711 This is the only place we send complete at tx and not rx */
712 if (dd->read_buf == NULL &&
713 readl_relaxed(dd->base + SPI_OPERATIONAL) &
714 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
715 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530716 if (atomic_inc_return(&dd->tx_irq_called) == 1)
717 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 msm_spi_complete(dd);
719 return IRQ_HANDLED;
720 }
721 return IRQ_NONE;
722 }
723
724 /* Output FIFO is empty. Transmit any outstanding write data. */
725 if (dd->mode == SPI_FIFO_MODE)
726 msm_spi_write_rmn_to_fifo(dd);
727
728 return IRQ_HANDLED;
729}
730
731static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
732{
733 struct spi_master *master = dev_id;
734 struct msm_spi *dd = spi_master_get_devdata(master);
735 u32 spi_err;
736
737 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
738 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
739 dev_warn(master->dev.parent, "SPI output overrun error\n");
740 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
741 dev_warn(master->dev.parent, "SPI input underrun error\n");
742 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
743 dev_warn(master->dev.parent, "SPI output underrun error\n");
744 msm_spi_get_clk_err(dd, &spi_err);
745 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
746 dev_warn(master->dev.parent, "SPI clock overrun error\n");
747 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
748 dev_warn(master->dev.parent, "SPI clock underrun error\n");
749 msm_spi_clear_error_flags(dd);
750 msm_spi_ack_clk_err(dd);
751 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
752 mb();
753 return IRQ_HANDLED;
754}
755
756static int msm_spi_map_dma_buffers(struct msm_spi *dd)
757{
758 struct device *dev;
759 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600760 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 void *tx_buf, *rx_buf;
762 unsigned tx_len, rx_len;
763 int ret = -EINVAL;
764
765 dev = &dd->cur_msg->spi->dev;
766 first_xfr = dd->cur_transfer;
767 tx_buf = (void *)first_xfr->tx_buf;
768 rx_buf = first_xfr->rx_buf;
769 tx_len = rx_len = first_xfr->len;
770
771 /*
772 * For WR-WR and WR-RD transfers, we allocate our own temporary
773 * buffer and copy the data to/from the client buffers.
774 */
775 if (dd->multi_xfr) {
776 dd->temp_buf = kzalloc(dd->cur_msg_len,
777 GFP_KERNEL | __GFP_DMA);
778 if (!dd->temp_buf)
779 return -ENOMEM;
780 nxt_xfr = list_entry(first_xfr->transfer_list.next,
781 struct spi_transfer, transfer_list);
782
783 if (dd->write_len && !dd->read_len) {
784 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
785 goto error;
786
787 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
788 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
789 nxt_xfr->len);
790 tx_buf = dd->temp_buf;
791 tx_len = dd->cur_msg_len;
792 } else {
793 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
794 goto error;
795
796 rx_buf = dd->temp_buf;
797 rx_len = dd->cur_msg_len;
798 }
799 }
800 if (tx_buf != NULL) {
801 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
802 tx_len, DMA_TO_DEVICE);
803 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
804 dev_err(dev, "dma %cX %d bytes error\n",
805 'T', tx_len);
806 ret = -ENOMEM;
807 goto error;
808 }
809 }
810 if (rx_buf != NULL) {
811 dma_addr_t dma_handle;
812 dma_handle = dma_map_single(dev, rx_buf,
813 rx_len, DMA_FROM_DEVICE);
814 if (dma_mapping_error(NULL, dma_handle)) {
815 dev_err(dev, "dma %cX %d bytes error\n",
816 'R', rx_len);
817 if (tx_buf != NULL)
818 dma_unmap_single(NULL, first_xfr->tx_dma,
819 tx_len, DMA_TO_DEVICE);
820 ret = -ENOMEM;
821 goto error;
822 }
823 if (dd->multi_xfr)
824 nxt_xfr->rx_dma = dma_handle;
825 else
826 first_xfr->rx_dma = dma_handle;
827 }
828 return 0;
829
830error:
831 kfree(dd->temp_buf);
832 dd->temp_buf = NULL;
833 return ret;
834}
835
836static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
837{
838 struct device *dev;
839 u32 offset;
840
841 dev = &dd->cur_msg->spi->dev;
842 if (dd->cur_msg->is_dma_mapped)
843 goto unmap_end;
844
845 if (dd->multi_xfr) {
846 if (dd->write_len && !dd->read_len) {
847 dma_unmap_single(dev,
848 dd->cur_transfer->tx_dma,
849 dd->cur_msg_len,
850 DMA_TO_DEVICE);
851 } else {
852 struct spi_transfer *prev_xfr;
853 prev_xfr = list_entry(
854 dd->cur_transfer->transfer_list.prev,
855 struct spi_transfer,
856 transfer_list);
857 if (dd->cur_transfer->rx_buf) {
858 dma_unmap_single(dev,
859 dd->cur_transfer->rx_dma,
860 dd->cur_msg_len,
861 DMA_FROM_DEVICE);
862 }
863 if (prev_xfr->tx_buf) {
864 dma_unmap_single(dev,
865 prev_xfr->tx_dma,
866 prev_xfr->len,
867 DMA_TO_DEVICE);
868 }
869 if (dd->unaligned_len && dd->read_buf) {
870 offset = dd->cur_msg_len - dd->unaligned_len;
871 dma_coherent_post_ops();
872 memcpy(dd->read_buf + offset, dd->rx_padding,
873 dd->unaligned_len);
874 memcpy(dd->cur_transfer->rx_buf,
875 dd->read_buf + prev_xfr->len,
876 dd->cur_transfer->len);
877 }
878 }
879 kfree(dd->temp_buf);
880 dd->temp_buf = NULL;
881 return;
882 } else {
883 if (dd->cur_transfer->rx_buf)
884 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
885 dd->cur_transfer->len,
886 DMA_FROM_DEVICE);
887 if (dd->cur_transfer->tx_buf)
888 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
889 dd->cur_transfer->len,
890 DMA_TO_DEVICE);
891 }
892
893unmap_end:
894 /* If we padded the transfer, we copy it from the padding buf */
895 if (dd->unaligned_len && dd->read_buf) {
896 offset = dd->cur_transfer->len - dd->unaligned_len;
897 dma_coherent_post_ops();
898 memcpy(dd->read_buf + offset, dd->rx_padding,
899 dd->unaligned_len);
900 }
901}
902
903/**
904 * msm_use_dm - decides whether to use data mover for this
905 * transfer
906 * @dd: device
907 * @tr: transfer
908 *
909 * Start using DM if:
910 * 1. Transfer is longer than 3*block size.
911 * 2. Buffers should be aligned to cache line.
912 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
913 */
914static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
915 u8 bpw)
916{
917 u32 cache_line = dma_get_cache_alignment();
918
919 if (!dd->use_dma)
920 return 0;
921
922 if (dd->cur_msg_len < 3*dd->input_block_size)
923 return 0;
924
925 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
926 return 0;
927
928 if (tr->tx_buf) {
929 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
930 return 0;
931 }
932 if (tr->rx_buf) {
933 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
934 return 0;
935 }
936
937 if (tr->cs_change &&
938 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
939 return 0;
940 return 1;
941}
942
943static void msm_spi_process_transfer(struct msm_spi *dd)
944{
945 u8 bpw;
946 u32 spi_ioc;
947 u32 spi_iom;
948 u32 spi_ioc_orig;
949 u32 max_speed;
950 u32 chip_select;
951 u32 read_count;
952 u32 timeout;
953 u32 int_loopback = 0;
954
955 dd->tx_bytes_remaining = dd->cur_msg_len;
956 dd->rx_bytes_remaining = dd->cur_msg_len;
957 dd->read_buf = dd->cur_transfer->rx_buf;
958 dd->write_buf = dd->cur_transfer->tx_buf;
959 init_completion(&dd->transfer_complete);
960 if (dd->cur_transfer->bits_per_word)
961 bpw = dd->cur_transfer->bits_per_word;
962 else
963 if (dd->cur_msg->spi->bits_per_word)
964 bpw = dd->cur_msg->spi->bits_per_word;
965 else
966 bpw = 8;
967 dd->bytes_per_word = (bpw + 7) / 8;
968
969 if (dd->cur_transfer->speed_hz)
970 max_speed = dd->cur_transfer->speed_hz;
971 else
972 max_speed = dd->cur_msg->spi->max_speed_hz;
973 if (!dd->clock_speed || max_speed != dd->clock_speed)
974 msm_spi_clock_set(dd, max_speed);
975
976 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
977 if (dd->cur_msg->spi->mode & SPI_LOOP)
978 int_loopback = 1;
979 if (int_loopback && dd->multi_xfr &&
980 (read_count > dd->input_fifo_size)) {
981 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700982 pr_err(
983 "%s:Internal Loopback does not support > fifo size"
984 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985 __func__);
986 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700987 pr_err(
988 "%s:Internal Loopback does not support > fifo size"
989 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 __func__);
991 return;
992 }
993 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
994 dd->mode = SPI_FIFO_MODE;
995 if (dd->multi_xfr) {
996 dd->read_len = dd->cur_transfer->len;
997 dd->write_len = dd->cur_transfer->len;
998 }
999 /* read_count cannot exceed fifo_size, and only one READ COUNT
1000 interrupt is generated per transaction, so for transactions
1001 larger than fifo size READ COUNT must be disabled.
1002 For those transactions we usually move to Data Mover mode.
1003 */
1004 if (read_count <= dd->input_fifo_size) {
1005 writel_relaxed(read_count,
1006 dd->base + SPI_MX_READ_COUNT);
1007 msm_spi_set_write_count(dd, read_count);
1008 } else {
1009 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1010 msm_spi_set_write_count(dd, 0);
1011 }
1012 } else {
1013 dd->mode = SPI_DMOV_MODE;
1014 if (dd->write_len && dd->read_len) {
1015 dd->tx_bytes_remaining = dd->write_len;
1016 dd->rx_bytes_remaining = dd->read_len;
1017 }
1018 }
1019
1020 /* Write mode - fifo or data mover*/
1021 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1022 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1023 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1024 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1025 /* Turn on packing for data mover */
1026 if (dd->mode == SPI_DMOV_MODE)
1027 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1028 else
1029 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1030 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1031
1032 msm_spi_set_config(dd, bpw);
1033
1034 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1035 spi_ioc_orig = spi_ioc;
1036 if (dd->cur_msg->spi->mode & SPI_CPOL)
1037 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1038 else
1039 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1040 chip_select = dd->cur_msg->spi->chip_select << 2;
1041 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1042 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1043 if (!dd->cur_transfer->cs_change)
1044 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1045 if (spi_ioc != spi_ioc_orig)
1046 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1047
1048 if (dd->mode == SPI_DMOV_MODE) {
1049 msm_spi_setup_dm_transfer(dd);
1050 msm_spi_enqueue_dm_commands(dd);
1051 }
1052 /* The output fifo interrupt handler will handle all writes after
1053 the first. Restricting this to one write avoids contention
1054 issues and race conditions between this thread and the int handler
1055 */
1056 else if (dd->mode == SPI_FIFO_MODE) {
1057 if (msm_spi_prepare_for_write(dd))
1058 goto transfer_end;
1059 msm_spi_start_write(dd, read_count);
1060 }
1061
1062 /* Only enter the RUN state after the first word is written into
1063 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1064 might fire before the first word is written resulting in a
1065 possible race condition.
1066 */
1067 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1068 goto transfer_end;
1069
1070 timeout = 100 * msecs_to_jiffies(
1071 DIV_ROUND_UP(dd->cur_msg_len * 8,
1072 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1073
1074 /* Assume success, this might change later upon transaction result */
1075 dd->cur_msg->status = 0;
1076 do {
1077 if (!wait_for_completion_timeout(&dd->transfer_complete,
1078 timeout)) {
1079 dev_err(dd->dev, "%s: SPI transaction "
1080 "timeout\n", __func__);
1081 dd->cur_msg->status = -EIO;
1082 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001083 msm_dmov_flush(dd->tx_dma_chan, 1);
1084 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085 }
1086 break;
1087 }
1088 } while (msm_spi_dm_send_next(dd));
1089
1090transfer_end:
1091 if (dd->mode == SPI_DMOV_MODE)
1092 msm_spi_unmap_dma_buffers(dd);
1093 dd->mode = SPI_MODE_NONE;
1094
1095 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1096 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1097 dd->base + SPI_IO_CONTROL);
1098}
1099
1100static void get_transfer_length(struct msm_spi *dd)
1101{
1102 struct spi_transfer *tr;
1103 int num_xfrs = 0;
1104 int readlen = 0;
1105 int writelen = 0;
1106
1107 dd->cur_msg_len = 0;
1108 dd->multi_xfr = 0;
1109 dd->read_len = dd->write_len = 0;
1110
1111 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1112 if (tr->tx_buf)
1113 writelen += tr->len;
1114 if (tr->rx_buf)
1115 readlen += tr->len;
1116 dd->cur_msg_len += tr->len;
1117 num_xfrs++;
1118 }
1119
1120 if (num_xfrs == 2) {
1121 struct spi_transfer *first_xfr = dd->cur_transfer;
1122
1123 dd->multi_xfr = 1;
1124 tr = list_entry(first_xfr->transfer_list.next,
1125 struct spi_transfer,
1126 transfer_list);
1127 /*
1128 * We update dd->read_len and dd->write_len only
1129 * for WR-WR and WR-RD transfers.
1130 */
1131 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1132 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1133 ((!tr->tx_buf) && (tr->rx_buf))) {
1134 dd->read_len = readlen;
1135 dd->write_len = writelen;
1136 }
1137 }
1138 } else if (num_xfrs > 1)
1139 dd->multi_xfr = 1;
1140}
1141
1142static inline int combine_transfers(struct msm_spi *dd)
1143{
1144 struct spi_transfer *t = dd->cur_transfer;
1145 struct spi_transfer *nxt;
1146 int xfrs_grped = 1;
1147
1148 dd->cur_msg_len = dd->cur_transfer->len;
1149 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1150 nxt = list_entry(t->transfer_list.next,
1151 struct spi_transfer,
1152 transfer_list);
1153 if (t->cs_change != nxt->cs_change)
1154 return xfrs_grped;
1155 dd->cur_msg_len += nxt->len;
1156 xfrs_grped++;
1157 t = nxt;
1158 }
1159 return xfrs_grped;
1160}
1161
Harini Jayaraman093938a2012-04-20 15:33:23 -06001162static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1163{
1164 u32 spi_ioc;
1165 u32 spi_ioc_orig;
1166
1167 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1168 spi_ioc_orig = spi_ioc;
1169 if (set_flag)
1170 spi_ioc |= SPI_IO_C_FORCE_CS;
1171 else
1172 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1173
1174 if (spi_ioc != spi_ioc_orig)
1175 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1176}
1177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001178static void msm_spi_process_message(struct msm_spi *dd)
1179{
1180 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001181 int cs_num;
1182 int rc;
1183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001185 cs_num = dd->cur_msg->spi->chip_select;
1186 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1187 (!(dd->cs_gpios[cs_num].valid)) &&
1188 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1189 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1190 spi_cs_rsrcs[cs_num]);
1191 if (rc) {
1192 dev_err(dd->dev, "gpio_request for pin %d failed with "
1193 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1194 rc);
1195 return;
1196 }
1197 dd->cs_gpios[cs_num].valid = 1;
1198 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001200 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001201 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001203 &dd->cur_msg->transfers,
1204 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001205 struct spi_transfer *t = dd->cur_transfer;
1206 struct spi_transfer *nxt;
1207
1208 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1209 nxt = list_entry(t->transfer_list.next,
1210 struct spi_transfer,
1211 transfer_list);
1212
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001213 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001214 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001215 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001216 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001218
1219 dd->cur_msg_len = dd->cur_transfer->len;
1220 msm_spi_process_transfer(dd);
1221 }
1222 } else {
1223 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1224 struct spi_transfer,
1225 transfer_list);
1226 get_transfer_length(dd);
1227 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1228 /*
1229 * Handling of multi-transfers.
1230 * FIFO mode is used by default
1231 */
1232 list_for_each_entry(dd->cur_transfer,
1233 &dd->cur_msg->transfers,
1234 transfer_list) {
1235 if (!dd->cur_transfer->len)
1236 goto error;
1237 if (xfrs_grped) {
1238 xfrs_grped--;
1239 continue;
1240 } else {
1241 dd->read_len = dd->write_len = 0;
1242 xfrs_grped = combine_transfers(dd);
1243 }
1244
1245 dd->cur_tx_transfer = dd->cur_transfer;
1246 dd->cur_rx_transfer = dd->cur_transfer;
1247 msm_spi_process_transfer(dd);
1248 xfrs_grped--;
1249 }
1250 } else {
1251 /* Handling of a single transfer or
1252 * WR-WR or WR-RD transfers
1253 */
1254 if ((!dd->cur_msg->is_dma_mapped) &&
1255 (msm_use_dm(dd, dd->cur_transfer,
1256 dd->cur_transfer->bits_per_word))) {
1257 /* Mapping of DMA buffers */
1258 int ret = msm_spi_map_dma_buffers(dd);
1259 if (ret < 0) {
1260 dd->cur_msg->status = ret;
1261 goto error;
1262 }
1263 }
1264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 dd->cur_tx_transfer = dd->cur_transfer;
1266 dd->cur_rx_transfer = dd->cur_transfer;
1267 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001270
1271 return;
1272
1273error:
1274 if (dd->cs_gpios[cs_num].valid) {
1275 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1276 dd->cs_gpios[cs_num].valid = 0;
1277 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278}
1279
1280/* workqueue - pull messages from queue & process */
1281static void msm_spi_workq(struct work_struct *work)
1282{
1283 struct msm_spi *dd =
1284 container_of(work, struct msm_spi, work_data);
1285 unsigned long flags;
1286 u32 status_error = 0;
1287
1288 mutex_lock(&dd->core_lock);
1289
1290 /* Don't allow power collapse until we release mutex */
1291 if (pm_qos_request_active(&qos_req_list))
1292 pm_qos_update_request(&qos_req_list,
1293 dd->pm_lat);
1294 if (dd->use_rlock)
1295 remote_mutex_lock(&dd->r_lock);
1296
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001297 clk_prepare_enable(dd->clk);
1298 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299 msm_spi_enable_irqs(dd);
1300
1301 if (!msm_spi_is_valid_state(dd)) {
1302 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1303 __func__);
1304 status_error = 1;
1305 }
1306
1307 spin_lock_irqsave(&dd->queue_lock, flags);
1308 while (!list_empty(&dd->queue)) {
1309 dd->cur_msg = list_entry(dd->queue.next,
1310 struct spi_message, queue);
1311 list_del_init(&dd->cur_msg->queue);
1312 spin_unlock_irqrestore(&dd->queue_lock, flags);
1313 if (status_error)
1314 dd->cur_msg->status = -EIO;
1315 else
1316 msm_spi_process_message(dd);
1317 if (dd->cur_msg->complete)
1318 dd->cur_msg->complete(dd->cur_msg->context);
1319 spin_lock_irqsave(&dd->queue_lock, flags);
1320 }
1321 dd->transfer_pending = 0;
1322 spin_unlock_irqrestore(&dd->queue_lock, flags);
1323
1324 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001325 clk_disable_unprepare(dd->clk);
1326 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327
1328 if (dd->use_rlock)
1329 remote_mutex_unlock(&dd->r_lock);
1330
1331 if (pm_qos_request_active(&qos_req_list))
1332 pm_qos_update_request(&qos_req_list,
1333 PM_QOS_DEFAULT_VALUE);
1334
1335 mutex_unlock(&dd->core_lock);
1336 /* If needed, this can be done after the current message is complete,
1337 and work can be continued upon resume. No motivation for now. */
1338 if (dd->suspended)
1339 wake_up_interruptible(&dd->continue_suspend);
1340}
1341
1342static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1343{
1344 struct msm_spi *dd;
1345 unsigned long flags;
1346 struct spi_transfer *tr;
1347
1348 dd = spi_master_get_devdata(spi->master);
1349 if (dd->suspended)
1350 return -EBUSY;
1351
1352 if (list_empty(&msg->transfers) || !msg->complete)
1353 return -EINVAL;
1354
1355 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1356 /* Check message parameters */
1357 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1358 (tr->bits_per_word &&
1359 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1360 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1361 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1362 "tx=%p, rx=%p\n",
1363 tr->speed_hz, tr->bits_per_word,
1364 tr->tx_buf, tr->rx_buf);
1365 return -EINVAL;
1366 }
1367 }
1368
1369 spin_lock_irqsave(&dd->queue_lock, flags);
1370 if (dd->suspended) {
1371 spin_unlock_irqrestore(&dd->queue_lock, flags);
1372 return -EBUSY;
1373 }
1374 dd->transfer_pending = 1;
1375 list_add_tail(&msg->queue, &dd->queue);
1376 spin_unlock_irqrestore(&dd->queue_lock, flags);
1377 queue_work(dd->workqueue, &dd->work_data);
1378 return 0;
1379}
1380
1381static int msm_spi_setup(struct spi_device *spi)
1382{
1383 struct msm_spi *dd;
1384 int rc = 0;
1385 u32 spi_ioc;
1386 u32 spi_config;
1387 u32 mask;
1388
1389 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1390 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1391 __func__, spi->bits_per_word);
1392 rc = -EINVAL;
1393 }
1394 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1395 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1396 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1397 rc = -EINVAL;
1398 }
1399
1400 if (rc)
1401 goto err_setup_exit;
1402
1403 dd = spi_master_get_devdata(spi->master);
1404
1405 mutex_lock(&dd->core_lock);
1406 if (dd->suspended) {
1407 mutex_unlock(&dd->core_lock);
1408 return -EBUSY;
1409 }
1410
1411 if (dd->use_rlock)
1412 remote_mutex_lock(&dd->r_lock);
1413
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001414 clk_prepare_enable(dd->clk);
1415 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416
1417 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1418 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1419 if (spi->mode & SPI_CS_HIGH)
1420 spi_ioc |= mask;
1421 else
1422 spi_ioc &= ~mask;
1423 if (spi->mode & SPI_CPOL)
1424 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1425 else
1426 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1427
1428 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1429
1430 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1431 if (spi->mode & SPI_LOOP)
1432 spi_config |= SPI_CFG_LOOPBACK;
1433 else
1434 spi_config &= ~SPI_CFG_LOOPBACK;
1435 if (spi->mode & SPI_CPHA)
1436 spi_config &= ~SPI_CFG_INPUT_FIRST;
1437 else
1438 spi_config |= SPI_CFG_INPUT_FIRST;
1439 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1440
1441 /* Ensure previous write completed before disabling the clocks */
1442 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001443 clk_disable_unprepare(dd->clk);
1444 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445
1446 if (dd->use_rlock)
1447 remote_mutex_unlock(&dd->r_lock);
1448 mutex_unlock(&dd->core_lock);
1449
1450err_setup_exit:
1451 return rc;
1452}
1453
1454#ifdef CONFIG_DEBUG_FS
1455static int debugfs_iomem_x32_set(void *data, u64 val)
1456{
1457 writel_relaxed(val, data);
1458 /* Ensure the previous write completed. */
1459 mb();
1460 return 0;
1461}
1462
1463static int debugfs_iomem_x32_get(void *data, u64 *val)
1464{
1465 *val = readl_relaxed(data);
1466 /* Ensure the previous read completed. */
1467 mb();
1468 return 0;
1469}
1470
1471DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1472 debugfs_iomem_x32_set, "0x%08llx\n");
1473
1474static void spi_debugfs_init(struct msm_spi *dd)
1475{
1476 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1477 if (dd->dent_spi) {
1478 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001480 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1481 dd->debugfs_spi_regs[i] =
1482 debugfs_create_file(
1483 debugfs_spi_regs[i].name,
1484 debugfs_spi_regs[i].mode,
1485 dd->dent_spi,
1486 dd->base + debugfs_spi_regs[i].offset,
1487 &fops_iomem_x32);
1488 }
1489 }
1490}
1491
1492static void spi_debugfs_exit(struct msm_spi *dd)
1493{
1494 if (dd->dent_spi) {
1495 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001497 debugfs_remove_recursive(dd->dent_spi);
1498 dd->dent_spi = NULL;
1499 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1500 dd->debugfs_spi_regs[i] = NULL;
1501 }
1502}
1503#else
1504static void spi_debugfs_init(struct msm_spi *dd) {}
1505static void spi_debugfs_exit(struct msm_spi *dd) {}
1506#endif
1507
1508/* ===Device attributes begin=== */
1509static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1510 char *buf)
1511{
1512 struct spi_master *master = dev_get_drvdata(dev);
1513 struct msm_spi *dd = spi_master_get_devdata(master);
1514
1515 return snprintf(buf, PAGE_SIZE,
1516 "Device %s\n"
1517 "rx fifo_size = %d spi words\n"
1518 "tx fifo_size = %d spi words\n"
1519 "use_dma ? %s\n"
1520 "rx block size = %d bytes\n"
1521 "tx block size = %d bytes\n"
1522 "burst size = %d bytes\n"
1523 "DMA configuration:\n"
1524 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1525 "--statistics--\n"
1526 "Rx isrs = %d\n"
1527 "Tx isrs = %d\n"
1528 "DMA error = %d\n"
1529 "--debug--\n"
1530 "NA yet\n",
1531 dev_name(dev),
1532 dd->input_fifo_size,
1533 dd->output_fifo_size,
1534 dd->use_dma ? "yes" : "no",
1535 dd->input_block_size,
1536 dd->output_block_size,
1537 dd->burst_size,
1538 dd->tx_dma_chan,
1539 dd->rx_dma_chan,
1540 dd->tx_dma_crci,
1541 dd->rx_dma_crci,
1542 dd->stat_rx + dd->stat_dmov_rx,
1543 dd->stat_tx + dd->stat_dmov_tx,
1544 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1545 );
1546}
1547
1548/* Reset statistics on write */
1549static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1550 const char *buf, size_t count)
1551{
1552 struct msm_spi *dd = dev_get_drvdata(dev);
1553 dd->stat_rx = 0;
1554 dd->stat_tx = 0;
1555 dd->stat_dmov_rx = 0;
1556 dd->stat_dmov_tx = 0;
1557 dd->stat_dmov_rx_err = 0;
1558 dd->stat_dmov_tx_err = 0;
1559 return count;
1560}
1561
1562static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1563
1564static struct attribute *dev_attrs[] = {
1565 &dev_attr_stats.attr,
1566 NULL,
1567};
1568
1569static struct attribute_group dev_attr_grp = {
1570 .attrs = dev_attrs,
1571};
1572/* ===Device attributes end=== */
1573
1574/**
1575 * spi_dmov_tx_complete_func - DataMover tx completion callback
1576 *
1577 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1578 * spinlock @msm_dmov_lock held.
1579 */
1580static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1581 unsigned int result,
1582 struct msm_dmov_errdata *err)
1583{
1584 struct msm_spi *dd;
1585
1586 if (!(result & DMOV_RSLT_VALID)) {
1587 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1588 return;
1589 }
1590 /* restore original context */
1591 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301592 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301594 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1595 return;
1596 complete(&dd->transfer_complete);
1597 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001598 /* Error or flush */
1599 if (result & DMOV_RSLT_ERROR) {
1600 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1601 dd->stat_dmov_tx_err++;
1602 }
1603 if (result & DMOV_RSLT_FLUSH) {
1604 /*
1605 * Flushing normally happens in process of
1606 * removing, when we are waiting for outstanding
1607 * DMA commands to be flushed.
1608 */
1609 dev_info(dd->dev,
1610 "DMA channel flushed (0x%08x)\n", result);
1611 }
1612 if (err)
1613 dev_err(dd->dev,
1614 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1615 err->flush[0], err->flush[1], err->flush[2],
1616 err->flush[3], err->flush[4], err->flush[5]);
1617 dd->cur_msg->status = -EIO;
1618 complete(&dd->transfer_complete);
1619 }
1620}
1621
1622/**
1623 * spi_dmov_rx_complete_func - DataMover rx completion callback
1624 *
1625 * Executed in IRQ context (Data Mover's IRQ)
1626 * DataMover's spinlock @msm_dmov_lock held.
1627 */
1628static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1629 unsigned int result,
1630 struct msm_dmov_errdata *err)
1631{
1632 struct msm_spi *dd;
1633
1634 if (!(result & DMOV_RSLT_VALID)) {
1635 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1636 result, cmd);
1637 return;
1638 }
1639 /* restore original context */
1640 dd = container_of(cmd, struct msm_spi, rx_hdr);
1641 if (result & DMOV_RSLT_DONE) {
1642 dd->stat_dmov_rx++;
1643 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1644 return;
1645 complete(&dd->transfer_complete);
1646 } else {
1647 /** Error or flush */
1648 if (result & DMOV_RSLT_ERROR) {
1649 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1650 dd->stat_dmov_rx_err++;
1651 }
1652 if (result & DMOV_RSLT_FLUSH) {
1653 dev_info(dd->dev,
1654 "DMA channel flushed(0x%08x)\n", result);
1655 }
1656 if (err)
1657 dev_err(dd->dev,
1658 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1659 err->flush[0], err->flush[1], err->flush[2],
1660 err->flush[3], err->flush[4], err->flush[5]);
1661 dd->cur_msg->status = -EIO;
1662 complete(&dd->transfer_complete);
1663 }
1664}
1665
1666static inline u32 get_chunk_size(struct msm_spi *dd)
1667{
1668 u32 cache_line = dma_get_cache_alignment();
1669
1670 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1671 roundup(dd->burst_size, cache_line))*2;
1672}
1673
1674static void msm_spi_teardown_dma(struct msm_spi *dd)
1675{
1676 int limit = 0;
1677
1678 if (!dd->use_dma)
1679 return;
1680
1681 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001682 msm_dmov_flush(dd->tx_dma_chan, 1);
1683 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001684 msleep(10);
1685 }
1686
1687 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1688 dd->tx_dmov_cmd_dma);
1689 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1690 dd->tx_padding = dd->rx_padding = NULL;
1691}
1692
1693static __init int msm_spi_init_dma(struct msm_spi *dd)
1694{
1695 dmov_box *box;
1696 u32 cache_line = dma_get_cache_alignment();
1697
1698 /* Allocate all as one chunk, since all is smaller than page size */
1699
1700 /* We send NULL device, since it requires coherent_dma_mask id
1701 device definition, we're okay with using system pool */
1702 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1703 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1704 if (dd->tx_dmov_cmd == NULL)
1705 return -ENOMEM;
1706
1707 /* DMA addresses should be 64 bit aligned aligned */
1708 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1709 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1710 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1711 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1712
1713 /* Buffers should be aligned to cache line */
1714 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1715 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1716 sizeof(struct spi_dmov_cmd), cache_line);
1717 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1718 cache_line);
1719 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1720 cache_line);
1721
1722 /* Setup DM commands */
1723 box = &(dd->rx_dmov_cmd->box);
1724 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1725 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1726 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1727 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1728 offsetof(struct spi_dmov_cmd, cmd_ptr));
1729 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730
1731 box = &(dd->tx_dmov_cmd->box);
1732 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1733 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1734 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1735 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1736 offsetof(struct spi_dmov_cmd, cmd_ptr));
1737 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738
1739 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1740 CMD_DST_CRCI(dd->tx_dma_crci);
1741 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1742 SPI_OUTPUT_FIFO;
1743 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1744 CMD_SRC_CRCI(dd->rx_dma_crci);
1745 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1746 SPI_INPUT_FIFO;
1747
1748 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001749 msm_dmov_flush(dd->tx_dma_chan, 1);
1750 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001751
1752 return 0;
1753}
1754
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001755struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1756{
1757 struct device_node *node = pdev->dev.of_node;
1758 struct msm_spi_platform_data *pdata;
1759
1760 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1761 if (!pdata) {
1762 pr_err("Unable to allocate platform data\n");
1763 return NULL;
1764 }
1765
1766 of_property_read_u32(node, "spi-max-frequency",
1767 &pdata->max_clock_speed);
1768
1769 return pdata;
1770}
1771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772static int __init msm_spi_probe(struct platform_device *pdev)
1773{
1774 struct spi_master *master;
1775 struct msm_spi *dd;
1776 struct resource *resource;
1777 int rc = -ENXIO;
1778 int locked = 0;
1779 int i = 0;
1780 int clk_enabled = 0;
1781 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001782 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001783 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784
1785 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1786 if (!master) {
1787 rc = -ENOMEM;
1788 dev_err(&pdev->dev, "master allocation failed\n");
1789 goto err_probe_exit;
1790 }
1791
1792 master->bus_num = pdev->id;
1793 master->mode_bits = SPI_SUPPORTED_MODES;
1794 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1795 master->setup = msm_spi_setup;
1796 master->transfer = msm_spi_transfer;
1797 platform_set_drvdata(pdev, master);
1798 dd = spi_master_get_devdata(master);
1799
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001800 if (pdev->dev.of_node) {
1801 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1802 master->dev.of_node = pdev->dev.of_node;
1803 pdata = msm_spi_dt_to_pdata(pdev);
1804 if (!pdata) {
1805 rc = -ENOMEM;
1806 goto err_probe_exit;
1807 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001808
1809 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1810 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1811 i, &flags);
1812 }
1813
1814 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1815 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1816 pdev->dev.of_node, "cs-gpios",
1817 i, &flags);
1818 dd->cs_gpios[i].valid = 0;
1819 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001820 } else {
1821 pdata = pdev->dev.platform_data;
1822 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001823
1824 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1825 resource = platform_get_resource(pdev, IORESOURCE_IO,
1826 i);
1827 dd->spi_gpios[i] = resource ? resource->start : -1;
1828 }
1829
1830 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1831 resource = platform_get_resource(pdev, IORESOURCE_IO,
1832 i + ARRAY_SIZE(spi_rsrcs));
1833 dd->cs_gpios[i].gpio_num = resource ?
1834 resource->start : -1;
1835 dd->cs_gpios[i].valid = 0;
1836 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001837 }
1838
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001839 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001840 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841 if (!resource) {
1842 rc = -ENXIO;
1843 goto err_probe_res;
1844 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846 dd->mem_phys_addr = resource->start;
1847 dd->mem_size = resource_size(resource);
1848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001849 if (pdata) {
1850 if (pdata->dma_config) {
1851 rc = pdata->dma_config();
1852 if (rc) {
1853 dev_warn(&pdev->dev,
1854 "%s: DM mode not supported\n",
1855 __func__);
1856 dd->use_dma = 0;
1857 goto skip_dma_resources;
1858 }
1859 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001860 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861 if (resource) {
1862 dd->rx_dma_chan = resource->start;
1863 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001864 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1865 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001866 if (!resource) {
1867 rc = -ENXIO;
1868 goto err_probe_res;
1869 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001871 dd->rx_dma_crci = resource->start;
1872 dd->tx_dma_crci = resource->end;
1873 dd->use_dma = 1;
1874 master->dma_alignment = dma_get_cache_alignment();
1875 }
1876
1877skip_dma_resources:
1878 if (pdata->gpio_config) {
1879 rc = pdata->gpio_config();
1880 if (rc) {
1881 dev_err(&pdev->dev,
1882 "%s: error configuring GPIOs\n",
1883 __func__);
1884 goto err_probe_gpio;
1885 }
1886 }
1887 }
1888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001889 rc = msm_spi_request_gpios(dd);
1890 if (rc)
1891 goto err_probe_gpio;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001893 spin_lock_init(&dd->queue_lock);
1894 mutex_init(&dd->core_lock);
1895 INIT_LIST_HEAD(&dd->queue);
1896 INIT_WORK(&dd->work_data, msm_spi_workq);
1897 init_waitqueue_head(&dd->continue_suspend);
1898 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001899 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001900 if (!dd->workqueue)
1901 goto err_probe_workq;
1902
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001903 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1904 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 rc = -ENXIO;
1906 goto err_probe_reqmem;
1907 }
1908
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001909 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1910 if (!dd->base) {
1911 rc = -ENOMEM;
1912 goto err_probe_reqmem;
1913 }
1914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915 if (pdata && pdata->rsl_id) {
1916 struct remote_mutex_id rmid;
1917 rmid.r_spinlock_id = pdata->rsl_id;
1918 rmid.delay_us = SPI_TRYLOCK_DELAY;
1919
1920 rc = remote_mutex_init(&dd->r_lock, &rmid);
1921 if (rc) {
1922 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1923 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1924 __func__, rc);
1925 goto err_probe_rlock_init;
1926 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001927
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 dd->use_rlock = 1;
1929 dd->pm_lat = pdata->pm_lat;
1930 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1931 PM_QOS_DEFAULT_VALUE);
1932 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934 mutex_lock(&dd->core_lock);
1935 if (dd->use_rlock)
1936 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001938 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001939 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001940 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001941 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001942 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001943 rc = PTR_ERR(dd->clk);
1944 goto err_probe_clk_get;
1945 }
1946
Matt Wagantallac294852011-08-17 15:44:58 -07001947 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001949 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 rc = PTR_ERR(dd->pclk);
1951 goto err_probe_pclk_get;
1952 }
1953
1954 if (pdata && pdata->max_clock_speed)
1955 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
1956
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001957 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001959 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960 __func__);
1961 goto err_probe_clk_enable;
1962 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001963
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001964 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001965 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001967 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 __func__);
1969 goto err_probe_pclk_enable;
1970 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001971
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001972 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001973 rc = msm_spi_configure_gsbi(dd, pdev);
1974 if (rc)
1975 goto err_probe_gsbi;
1976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977 msm_spi_calculate_fifo_size(dd);
1978 if (dd->use_dma) {
1979 rc = msm_spi_init_dma(dd);
1980 if (rc)
1981 goto err_probe_dma;
1982 }
1983
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001984 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 /*
1986 * The SPI core generates a bogus input overrun error on some targets,
1987 * when a transition from run to reset state occurs and if the FIFO has
1988 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
1989 * bit.
1990 */
1991 msm_spi_enable_error_flags(dd);
1992
1993 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
1994 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1995 if (rc)
1996 goto err_probe_state;
1997
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001998 clk_disable_unprepare(dd->clk);
1999 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000 clk_enabled = 0;
2001 pclk_enabled = 0;
2002
2003 dd->suspended = 0;
2004 dd->transfer_pending = 0;
2005 dd->multi_xfr = 0;
2006 dd->mode = SPI_MODE_NONE;
2007
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002008 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009 if (rc)
2010 goto err_probe_irq;
2011
2012 msm_spi_disable_irqs(dd);
2013 if (dd->use_rlock)
2014 remote_mutex_unlock(&dd->r_lock);
2015
2016 mutex_unlock(&dd->core_lock);
2017 locked = 0;
2018
2019 rc = spi_register_master(master);
2020 if (rc)
2021 goto err_probe_reg_master;
2022
2023 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2024 if (rc) {
2025 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2026 goto err_attrs;
2027 }
2028
2029 spi_debugfs_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030 return 0;
2031
2032err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002033 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002035err_probe_irq:
2036err_probe_state:
2037 msm_spi_teardown_dma(dd);
2038err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002039err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002040 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002041 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042err_probe_pclk_enable:
2043 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002044 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045err_probe_clk_enable:
2046 clk_put(dd->pclk);
2047err_probe_pclk_get:
2048 clk_put(dd->clk);
2049err_probe_clk_get:
2050 if (locked) {
2051 if (dd->use_rlock)
2052 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002054 mutex_unlock(&dd->core_lock);
2055 }
2056err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057err_probe_reqmem:
2058 destroy_workqueue(dd->workqueue);
2059err_probe_workq:
2060 msm_spi_free_gpios(dd);
2061err_probe_gpio:
2062 if (pdata && pdata->gpio_release)
2063 pdata->gpio_release();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064err_probe_res:
2065 spi_master_put(master);
2066err_probe_exit:
2067 return rc;
2068}
2069
2070#ifdef CONFIG_PM
2071static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2072{
2073 struct spi_master *master = platform_get_drvdata(pdev);
2074 struct msm_spi *dd;
2075 unsigned long flags;
2076
2077 if (!master)
2078 goto suspend_exit;
2079 dd = spi_master_get_devdata(master);
2080 if (!dd)
2081 goto suspend_exit;
2082
2083 /* Make sure nothing is added to the queue while we're suspending */
2084 spin_lock_irqsave(&dd->queue_lock, flags);
2085 dd->suspended = 1;
2086 spin_unlock_irqrestore(&dd->queue_lock, flags);
2087
2088 /* Wait for transactions to end, or time out */
2089 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2090 msm_spi_free_gpios(dd);
2091
2092suspend_exit:
2093 return 0;
2094}
2095
2096static int msm_spi_resume(struct platform_device *pdev)
2097{
2098 struct spi_master *master = platform_get_drvdata(pdev);
2099 struct msm_spi *dd;
2100
2101 if (!master)
2102 goto resume_exit;
2103 dd = spi_master_get_devdata(master);
2104 if (!dd)
2105 goto resume_exit;
2106
2107 BUG_ON(msm_spi_request_gpios(dd) != 0);
2108 dd->suspended = 0;
2109resume_exit:
2110 return 0;
2111}
2112#else
2113#define msm_spi_suspend NULL
2114#define msm_spi_resume NULL
2115#endif /* CONFIG_PM */
2116
2117static int __devexit msm_spi_remove(struct platform_device *pdev)
2118{
2119 struct spi_master *master = platform_get_drvdata(pdev);
2120 struct msm_spi *dd = spi_master_get_devdata(master);
2121 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2122
2123 pm_qos_remove_request(&qos_req_list);
2124 spi_debugfs_exit(dd);
2125 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002127 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128 if (pdata && pdata->gpio_release)
2129 pdata->gpio_release();
2130
2131 msm_spi_free_gpios(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132 clk_put(dd->clk);
2133 clk_put(dd->pclk);
2134 destroy_workqueue(dd->workqueue);
2135 platform_set_drvdata(pdev, 0);
2136 spi_unregister_master(master);
2137 spi_master_put(master);
2138
2139 return 0;
2140}
2141
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002142static struct of_device_id msm_spi_dt_match[] = {
2143 {
2144 .compatible = "qcom,spi-qup-v2",
2145 },
2146 {}
2147};
2148
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149static struct platform_driver msm_spi_driver = {
2150 .driver = {
2151 .name = SPI_DRV_NAME,
2152 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002153 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154 },
2155 .suspend = msm_spi_suspend,
2156 .resume = msm_spi_resume,
2157 .remove = __exit_p(msm_spi_remove),
2158};
2159
2160static int __init msm_spi_init(void)
2161{
2162 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2163}
2164module_init(msm_spi_init);
2165
2166static void __exit msm_spi_exit(void)
2167{
2168 platform_driver_unregister(&msm_spi_driver);
2169}
2170module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002171
2172MODULE_LICENSE("GPL v2");
2173MODULE_VERSION("0.4");
2174MODULE_ALIAS("platform:"SPI_DRV_NAME);