blob: 82dddf83daf741daea088d471f6d52cc82920d94 [file] [log] [blame]
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001/*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/spi/spi_bitbang.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Sandeep Paulraj358934a2009-12-16 22:02:18 +000031
32#include <mach/spi.h>
33#include <mach/edma.h>
34
35#define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37#define SPI_MAX_CHIPSELECT 2
38
39#define CS_DEFAULT 0xFF
40
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18)
49#define SPIFMT_SHIFTDIR_MASK BIT(20)
50#define SPIFMT_WAITENA_MASK BIT(21)
51#define SPIFMT_PARITYENA_MASK BIT(22)
52#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24
55#define SPIFMT_CHARLEN_MASK 0x0000001Fu
56
Sandeep Paulraj358934a2009-12-16 22:02:18 +000057
58/* SPIPC0 */
59#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
60#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
61#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
62#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000063
64#define SPIINT_MASKALL 0x0101035F
65#define SPI_INTLVL_1 0x000001FFu
66#define SPI_INTLVL_0 0x00000000u
67
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +053068/* SPIDAT1 (upper 16 bit defines) */
69#define SPIDAT1_CSHOLD_MASK BIT(12)
70
71/* SPIGCR1 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000072#define SPIGCR1_CLKMOD_MASK BIT(1)
73#define SPIGCR1_MASTER_MASK BIT(0)
74#define SPIGCR1_LOOPBACK_MASK BIT(16)
Sekhar Nori8e206f12010-08-20 16:20:49 +053075#define SPIGCR1_SPIENA_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000076
77/* SPIBUF */
78#define SPIBUF_TXFULL_MASK BIT(29)
79#define SPIBUF_RXEMPTY_MASK BIT(31)
80
81/* Error Masks */
82#define SPIFLG_DLEN_ERR_MASK BIT(0)
83#define SPIFLG_TIMEOUT_MASK BIT(1)
84#define SPIFLG_PARERR_MASK BIT(2)
85#define SPIFLG_DESYNC_MASK BIT(3)
86#define SPIFLG_BITERR_MASK BIT(4)
87#define SPIFLG_OVRRUN_MASK BIT(6)
88#define SPIFLG_RX_INTR_MASK BIT(8)
89#define SPIFLG_TX_INTR_MASK BIT(9)
90#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000091
Sandeep Paulraj358934a2009-12-16 22:02:18 +000092#define SPIINT_BITERR_INTR BIT(4)
93#define SPIINT_OVRRUN_INTR BIT(6)
94#define SPIINT_RX_INTR BIT(8)
95#define SPIINT_TX_INTR BIT(9)
96#define SPIINT_DMA_REQ_EN BIT(16)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000097
98#define SPI_T2CDELAY_SHIFT 16
99#define SPI_C2TDELAY_SHIFT 24
100
101/* SPI Controller registers */
102#define SPIGCR0 0x00
103#define SPIGCR1 0x04
104#define SPIINT 0x08
105#define SPILVL 0x0c
106#define SPIFLG 0x10
107#define SPIPC0 0x14
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000108#define SPIDAT1 0x3c
109#define SPIBUF 0x40
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000110#define SPIDELAY 0x48
111#define SPIDEF 0x4c
112#define SPIFMT0 0x50
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000113
114struct davinci_spi_slave {
115 u32 cmd_to_write;
116 u32 clk_ctrl_to_write;
117 u32 bytes_per_word;
118 u8 active_cs;
119};
120
121/* We have 2 DMA channels per CS, one for RX and one for TX */
122struct davinci_spi_dma {
123 int dma_tx_channel;
124 int dma_rx_channel;
125 int dma_tx_sync_dev;
126 int dma_rx_sync_dev;
127 enum dma_event_q eventq;
128
129 struct completion dma_tx_completion;
130 struct completion dma_rx_completion;
131};
132
133/* SPI Controller driver's private data. */
134struct davinci_spi {
135 struct spi_bitbang bitbang;
136 struct clk *clk;
137
138 u8 version;
139 resource_size_t pbase;
140 void __iomem *base;
141 size_t region_size;
142 u32 irq;
143 struct completion done;
144
145 const void *tx;
146 void *rx;
147 u8 *tmp_buf;
148 int count;
149 struct davinci_spi_dma *dma_channels;
Brian Niebuhr778e2612010-09-03 15:15:06 +0530150 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000151
152 void (*get_rx)(u32 rx_data, struct davinci_spi *);
153 u32 (*get_tx)(struct davinci_spi *);
154
155 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
156};
157
158static unsigned use_dma;
159
160static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
161{
162 u8 *rx = davinci_spi->rx;
163
164 *rx++ = (u8)data;
165 davinci_spi->rx = rx;
166}
167
168static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
169{
170 u16 *rx = davinci_spi->rx;
171
172 *rx++ = (u16)data;
173 davinci_spi->rx = rx;
174}
175
176static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
177{
178 u32 data;
179 const u8 *tx = davinci_spi->tx;
180
181 data = *tx++;
182 davinci_spi->tx = tx;
183 return data;
184}
185
186static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
187{
188 u32 data;
189 const u16 *tx = davinci_spi->tx;
190
191 data = *tx++;
192 davinci_spi->tx = tx;
193 return data;
194}
195
196static inline void set_io_bits(void __iomem *addr, u32 bits)
197{
198 u32 v = ioread32(addr);
199
200 v |= bits;
201 iowrite32(v, addr);
202}
203
204static inline void clear_io_bits(void __iomem *addr, u32 bits)
205{
206 u32 v = ioread32(addr);
207
208 v &= ~bits;
209 iowrite32(v, addr);
210}
211
212static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
213{
214 set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
215}
216
217static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
218{
219 clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
220}
221
222static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
223{
224 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
225
226 if (enable)
227 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
228 else
229 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
230}
231
232/*
233 * Interface to control the chip select signal
234 */
235static void davinci_spi_chipselect(struct spi_device *spi, int value)
236{
237 struct davinci_spi *davinci_spi;
238 struct davinci_spi_platform_data *pdata;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530239 u8 chip_sel = spi->chip_select;
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +0530240 u16 spidat1_cfg = CS_DEFAULT;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000241
242 davinci_spi = spi_master_get_devdata(spi->master);
243 pdata = davinci_spi->pdata;
244
245 /*
246 * Board specific chip select logic decides the polarity and cs
247 * line for the controller
248 */
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530249 if (value == BITBANG_CS_ACTIVE) {
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +0530250 spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
251 spidat1_cfg &= ~(0x1 << chip_sel);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000252 }
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530253
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +0530254 iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000255}
256
257/**
258 * davinci_spi_setup_transfer - This functions will determine transfer method
259 * @spi: spi device on which data transfer to be done
260 * @t: spi transfer in which transfer info is filled
261 *
262 * This function determines data transfer method (8/16/32 bit transfer).
263 * It will also set the SPI Clock Control register according to
264 * SPI slave device freq.
265 */
266static int davinci_spi_setup_transfer(struct spi_device *spi,
267 struct spi_transfer *t)
268{
269
270 struct davinci_spi *davinci_spi;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000271 u8 bits_per_word = 0;
Thomas Koeller0c2a2ae2010-04-26 09:01:45 +0000272 u32 hz = 0, prescale = 0, clkspeed;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000273
274 davinci_spi = spi_master_get_devdata(spi->master);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000275
276 if (t) {
277 bits_per_word = t->bits_per_word;
278 hz = t->speed_hz;
279 }
280
281 /* if bits_per_word is not set then set it default */
282 if (!bits_per_word)
283 bits_per_word = spi->bits_per_word;
284
285 /*
286 * Assign function pointer to appropriate transfer method
287 * 8bit, 16bit or 32bit transfer
288 */
289 if (bits_per_word <= 8 && bits_per_word >= 2) {
290 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
291 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
292 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
293 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
294 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
295 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
296 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
297 } else
298 return -EINVAL;
299
300 if (!hz)
301 hz = spi->max_speed_hz;
302
303 clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
304 spi->chip_select);
305 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
306 spi->chip_select);
307
Thomas Koeller0c2a2ae2010-04-26 09:01:45 +0000308 clkspeed = clk_get_rate(davinci_spi->clk);
309 if (hz > clkspeed / 2)
310 prescale = 1 << 8;
311 if (hz < clkspeed / 256)
312 prescale = 255 << 8;
313 if (!prescale)
314 prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000315
316 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
Thomas Koeller0c2a2ae2010-04-26 09:01:45 +0000317 set_fmt_bits(davinci_spi->base, prescale, spi->chip_select);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000318
319 return 0;
320}
321
322static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
323{
324 struct spi_device *spi = (struct spi_device *)data;
325 struct davinci_spi *davinci_spi;
326 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000327
328 davinci_spi = spi_master_get_devdata(spi->master);
329 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000330
331 if (ch_status == DMA_COMPLETE)
332 edma_stop(davinci_spi_dma->dma_rx_channel);
333 else
334 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
335
336 complete(&davinci_spi_dma->dma_rx_completion);
337 /* We must disable the DMA RX request */
338 davinci_spi_set_dma_req(spi, 0);
339}
340
341static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
342{
343 struct spi_device *spi = (struct spi_device *)data;
344 struct davinci_spi *davinci_spi;
345 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000346
347 davinci_spi = spi_master_get_devdata(spi->master);
348 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000349
350 if (ch_status == DMA_COMPLETE)
351 edma_stop(davinci_spi_dma->dma_tx_channel);
352 else
353 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
354
355 complete(&davinci_spi_dma->dma_tx_completion);
356 /* We must disable the DMA TX request */
357 davinci_spi_set_dma_req(spi, 0);
358}
359
360static int davinci_spi_request_dma(struct spi_device *spi)
361{
362 struct davinci_spi *davinci_spi;
363 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000364 struct device *sdev;
365 int r;
366
367 davinci_spi = spi_master_get_devdata(spi->master);
368 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000369 sdev = davinci_spi->bitbang.master->dev.parent;
370
371 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
372 davinci_spi_dma_rx_callback, spi,
373 davinci_spi_dma->eventq);
374 if (r < 0) {
375 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
376 return -EAGAIN;
377 }
378 davinci_spi_dma->dma_rx_channel = r;
379 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
380 davinci_spi_dma_tx_callback, spi,
381 davinci_spi_dma->eventq);
382 if (r < 0) {
383 edma_free_channel(davinci_spi_dma->dma_rx_channel);
384 davinci_spi_dma->dma_rx_channel = -1;
385 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
386 return -EAGAIN;
387 }
388 davinci_spi_dma->dma_tx_channel = r;
389
390 return 0;
391}
392
393/**
394 * davinci_spi_setup - This functions will set default transfer method
395 * @spi: spi device on which data transfer to be done
396 *
397 * This functions sets the default transfer method.
398 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000399static int davinci_spi_setup(struct spi_device *spi)
400{
401 int retval;
402 struct davinci_spi *davinci_spi;
403 struct davinci_spi_dma *davinci_spi_dma;
404 struct device *sdev;
405
406 davinci_spi = spi_master_get_devdata(spi->master);
407 sdev = davinci_spi->bitbang.master->dev.parent;
408
409 /* if bits per word length is zero then set it default 8 */
410 if (!spi->bits_per_word)
411 spi->bits_per_word = 8;
412
413 davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
414
415 if (use_dma && davinci_spi->dma_channels) {
416 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
417
418 if ((davinci_spi_dma->dma_rx_channel == -1)
419 || (davinci_spi_dma->dma_tx_channel == -1)) {
420 retval = davinci_spi_request_dma(spi);
421 if (retval < 0)
422 return retval;
423 }
424 }
425
426 /*
427 * SPI in DaVinci and DA8xx operate between
428 * 600 KHz and 50 MHz
429 */
430 if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
431 dev_dbg(sdev, "Operating frequency is not in acceptable "
432 "range\n");
433 return -EINVAL;
434 }
435
436 /*
437 * Set up SPIFMTn register, unique to this chipselect.
438 *
439 * NOTE: we could do all of these with one write. Also, some
440 * of the "version 2" features are found in chips that don't
441 * support all of them...
442 */
443 if (spi->mode & SPI_LSB_FIRST)
444 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
445 spi->chip_select);
446 else
447 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
448 spi->chip_select);
449
450 if (spi->mode & SPI_CPOL)
451 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
452 spi->chip_select);
453 else
454 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
455 spi->chip_select);
456
457 if (!(spi->mode & SPI_CPHA))
458 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
459 spi->chip_select);
460 else
461 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
462 spi->chip_select);
463
464 /*
465 * Version 1 hardware supports two basic SPI modes:
466 * - Standard SPI mode uses 4 pins, with chipselect
467 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
468 * (distinct from SPI_3WIRE, with just one data wire;
469 * or similar variants without MOSI or without MISO)
470 *
471 * Version 2 hardware supports an optional handshaking signal,
472 * so it can support two more modes:
473 * - 5 pin SPI variant is standard SPI plus SPI_READY
474 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
475 */
476
477 if (davinci_spi->version == SPI_VERSION_2) {
478 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
479 spi->chip_select);
480 set_fmt_bits(davinci_spi->base,
481 (davinci_spi->pdata->wdelay
482 << SPIFMT_WDELAY_SHIFT)
483 & SPIFMT_WDELAY_MASK,
484 spi->chip_select);
485
486 if (davinci_spi->pdata->odd_parity)
487 set_fmt_bits(davinci_spi->base,
488 SPIFMT_ODD_PARITY_MASK,
489 spi->chip_select);
490 else
491 clear_fmt_bits(davinci_spi->base,
492 SPIFMT_ODD_PARITY_MASK,
493 spi->chip_select);
494
495 if (davinci_spi->pdata->parity_enable)
496 set_fmt_bits(davinci_spi->base,
497 SPIFMT_PARITYENA_MASK,
498 spi->chip_select);
499 else
500 clear_fmt_bits(davinci_spi->base,
501 SPIFMT_PARITYENA_MASK,
502 spi->chip_select);
503
504 if (davinci_spi->pdata->wait_enable)
505 set_fmt_bits(davinci_spi->base,
506 SPIFMT_WAITENA_MASK,
507 spi->chip_select);
508 else
509 clear_fmt_bits(davinci_spi->base,
510 SPIFMT_WAITENA_MASK,
511 spi->chip_select);
512
513 if (davinci_spi->pdata->timer_disable)
514 set_fmt_bits(davinci_spi->base,
515 SPIFMT_DISTIMER_MASK,
516 spi->chip_select);
517 else
518 clear_fmt_bits(davinci_spi->base,
519 SPIFMT_DISTIMER_MASK,
520 spi->chip_select);
521 }
522
523 retval = davinci_spi_setup_transfer(spi, NULL);
524
525 return retval;
526}
527
528static void davinci_spi_cleanup(struct spi_device *spi)
529{
530 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
531 struct davinci_spi_dma *davinci_spi_dma;
532
533 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
534
535 if (use_dma && davinci_spi->dma_channels) {
536 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
537
538 if ((davinci_spi_dma->dma_rx_channel != -1)
539 && (davinci_spi_dma->dma_tx_channel != -1)) {
540 edma_free_channel(davinci_spi_dma->dma_tx_channel);
541 edma_free_channel(davinci_spi_dma->dma_rx_channel);
542 }
543 }
544}
545
546static int davinci_spi_bufs_prep(struct spi_device *spi,
547 struct davinci_spi *davinci_spi)
548{
549 int op_mode = 0;
550
551 /*
552 * REVISIT unless devices disagree about SPI_LOOP or
553 * SPI_READY (SPI_NO_CS only allows one device!), this
554 * should not need to be done before each message...
555 * optimize for both flags staying cleared.
556 */
557
558 op_mode = SPIPC0_DIFUN_MASK
559 | SPIPC0_DOFUN_MASK
560 | SPIPC0_CLKFUN_MASK;
561 if (!(spi->mode & SPI_NO_CS))
562 op_mode |= 1 << spi->chip_select;
563 if (spi->mode & SPI_READY)
564 op_mode |= SPIPC0_SPIENA_MASK;
565
566 iowrite32(op_mode, davinci_spi->base + SPIPC0);
567
568 if (spi->mode & SPI_LOOP)
569 set_io_bits(davinci_spi->base + SPIGCR1,
570 SPIGCR1_LOOPBACK_MASK);
571 else
572 clear_io_bits(davinci_spi->base + SPIGCR1,
573 SPIGCR1_LOOPBACK_MASK);
574
575 return 0;
576}
577
578static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
579 int int_status)
580{
581 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
582
583 if (int_status & SPIFLG_TIMEOUT_MASK) {
584 dev_dbg(sdev, "SPI Time-out Error\n");
585 return -ETIMEDOUT;
586 }
587 if (int_status & SPIFLG_DESYNC_MASK) {
588 dev_dbg(sdev, "SPI Desynchronization Error\n");
589 return -EIO;
590 }
591 if (int_status & SPIFLG_BITERR_MASK) {
592 dev_dbg(sdev, "SPI Bit error\n");
593 return -EIO;
594 }
595
596 if (davinci_spi->version == SPI_VERSION_2) {
597 if (int_status & SPIFLG_DLEN_ERR_MASK) {
598 dev_dbg(sdev, "SPI Data Length Error\n");
599 return -EIO;
600 }
601 if (int_status & SPIFLG_PARERR_MASK) {
602 dev_dbg(sdev, "SPI Parity Error\n");
603 return -EIO;
604 }
605 if (int_status & SPIFLG_OVRRUN_MASK) {
606 dev_dbg(sdev, "SPI Data Overrun error\n");
607 return -EIO;
608 }
609 if (int_status & SPIFLG_TX_INTR_MASK) {
610 dev_dbg(sdev, "SPI TX intr bit set\n");
611 return -EIO;
612 }
613 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
614 dev_dbg(sdev, "SPI Buffer Init Active\n");
615 return -EBUSY;
616 }
617 }
618
619 return 0;
620}
621
622/**
623 * davinci_spi_bufs - functions which will handle transfer data
624 * @spi: spi device on which data transfer to be done
625 * @t: spi transfer in which transfer info is filled
626 *
627 * This function will put data to be transferred into data register
628 * of SPI controller and then wait until the completion will be marked
629 * by the IRQ Handler.
630 */
631static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
632{
633 struct davinci_spi *davinci_spi;
634 int int_status, count, ret;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530635 u8 conv;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000636 u32 tx_data, data1_reg_val;
637 u32 buf_val, flg_val;
638 struct davinci_spi_platform_data *pdata;
639
640 davinci_spi = spi_master_get_devdata(spi->master);
641 pdata = davinci_spi->pdata;
642
643 davinci_spi->tx = t->tx_buf;
644 davinci_spi->rx = t->rx_buf;
645
646 /* convert len to words based on bits_per_word */
647 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
648 davinci_spi->count = t->len / conv;
649
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530650 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
651
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000652 INIT_COMPLETION(davinci_spi->done);
653
654 ret = davinci_spi_bufs_prep(spi, davinci_spi);
655 if (ret)
656 return ret;
657
658 /* Enable SPI */
659 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
660
661 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
662 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
663 davinci_spi->base + SPIDELAY);
664
665 count = davinci_spi->count;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000666
667 /* Determine the command to execute READ or WRITE */
668 if (t->tx_buf) {
669 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
670
671 while (1) {
672 tx_data = davinci_spi->get_tx(davinci_spi);
673
674 data1_reg_val &= ~(0xFFFF);
675 data1_reg_val |= (0xFFFF & tx_data);
676
677 buf_val = ioread32(davinci_spi->base + SPIBUF);
678 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
679 iowrite32(data1_reg_val,
680 davinci_spi->base + SPIDAT1);
681
682 count--;
683 }
684 while (ioread32(davinci_spi->base + SPIBUF)
685 & SPIBUF_RXEMPTY_MASK)
686 cpu_relax();
687
688 /* getting the returned byte */
689 if (t->rx_buf) {
690 buf_val = ioread32(davinci_spi->base + SPIBUF);
691 davinci_spi->get_rx(buf_val, davinci_spi);
692 }
693 if (count <= 0)
694 break;
695 }
696 } else {
697 if (pdata->poll_mode) {
698 while (1) {
699 /* keeps the serial clock going */
700 if ((ioread32(davinci_spi->base + SPIBUF)
701 & SPIBUF_TXFULL_MASK) == 0)
702 iowrite32(data1_reg_val,
703 davinci_spi->base + SPIDAT1);
704
705 while (ioread32(davinci_spi->base + SPIBUF) &
706 SPIBUF_RXEMPTY_MASK)
707 cpu_relax();
708
709 flg_val = ioread32(davinci_spi->base + SPIFLG);
710 buf_val = ioread32(davinci_spi->base + SPIBUF);
711
712 davinci_spi->get_rx(buf_val, davinci_spi);
713
714 count--;
715 if (count <= 0)
716 break;
717 }
718 } else { /* Receive in Interrupt mode */
719 int i;
720
721 for (i = 0; i < davinci_spi->count; i++) {
722 set_io_bits(davinci_spi->base + SPIINT,
723 SPIINT_BITERR_INTR
724 | SPIINT_OVRRUN_INTR
725 | SPIINT_RX_INTR);
726
727 iowrite32(data1_reg_val,
728 davinci_spi->base + SPIDAT1);
729
730 while (ioread32(davinci_spi->base + SPIINT) &
731 SPIINT_RX_INTR)
732 cpu_relax();
733 }
734 iowrite32((data1_reg_val & 0x0ffcffff),
735 davinci_spi->base + SPIDAT1);
736 }
737 }
738
739 /*
740 * Check for bit error, desync error,parity error,timeout error and
741 * receive overflow errors
742 */
743 int_status = ioread32(davinci_spi->base + SPIFLG);
744
745 ret = davinci_spi_check_error(davinci_spi, int_status);
746 if (ret != 0)
747 return ret;
748
749 /* SPI Framework maintains the count only in bytes so convert back */
750 davinci_spi->count *= conv;
751
752 return t->len;
753}
754
755#define DAVINCI_DMA_DATA_TYPE_S8 0x01
756#define DAVINCI_DMA_DATA_TYPE_S16 0x02
757#define DAVINCI_DMA_DATA_TYPE_S32 0x04
758
759static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
760{
761 struct davinci_spi *davinci_spi;
762 int int_status = 0;
763 int count, temp_count;
764 u8 conv = 1;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000765 u32 data1_reg_val;
766 struct davinci_spi_dma *davinci_spi_dma;
767 int word_len, data_type, ret;
768 unsigned long tx_reg, rx_reg;
769 struct davinci_spi_platform_data *pdata;
770 struct device *sdev;
771
772 davinci_spi = spi_master_get_devdata(spi->master);
773 pdata = davinci_spi->pdata;
774 sdev = davinci_spi->bitbang.master->dev.parent;
775
776 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
777
778 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
779 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
780
781 davinci_spi->tx = t->tx_buf;
782 davinci_spi->rx = t->rx_buf;
783
784 /* convert len to words based on bits_per_word */
785 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
786 davinci_spi->count = t->len / conv;
787
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530788 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
789
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000790 INIT_COMPLETION(davinci_spi->done);
791
792 init_completion(&davinci_spi_dma->dma_rx_completion);
793 init_completion(&davinci_spi_dma->dma_tx_completion);
794
795 word_len = conv * 8;
796
797 if (word_len <= 8)
798 data_type = DAVINCI_DMA_DATA_TYPE_S8;
799 else if (word_len <= 16)
800 data_type = DAVINCI_DMA_DATA_TYPE_S16;
801 else if (word_len <= 32)
802 data_type = DAVINCI_DMA_DATA_TYPE_S32;
803 else
804 return -EINVAL;
805
806 ret = davinci_spi_bufs_prep(spi, davinci_spi);
807 if (ret)
808 return ret;
809
810 /* Put delay val if required */
811 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
812 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
813 davinci_spi->base + SPIDELAY);
814
815 count = davinci_spi->count; /* the number of elements */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000816
817 /* disable all interrupts for dma transfers */
818 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
819 /* Disable SPI to write configuration bits in SPIDAT */
820 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000821 /* Enable SPI */
822 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
823
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000824 if (t->tx_buf) {
825 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
826 DMA_TO_DEVICE);
827 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
828 dev_dbg(sdev, "Unable to DMA map a %d bytes"
829 " TX buffer\n", count);
830 return -ENOMEM;
831 }
832 temp_count = count;
833 } else {
834 /* We need TX clocking for RX transaction */
835 t->tx_dma = dma_map_single(&spi->dev,
836 (void *)davinci_spi->tmp_buf, count + 1,
837 DMA_TO_DEVICE);
838 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
839 dev_dbg(sdev, "Unable to DMA map a %d bytes"
840 " TX tmp buffer\n", count);
841 return -ENOMEM;
842 }
843 temp_count = count + 1;
844 }
845
846 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
847 data_type, temp_count, 1, 0, ASYNC);
848 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
849 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
850 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
851 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
852
853 if (t->rx_buf) {
854 /* initiate transaction */
855 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
856
857 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
858 DMA_FROM_DEVICE);
859 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
860 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
861 count);
862 if (t->tx_buf != NULL)
863 dma_unmap_single(NULL, t->tx_dma,
864 count, DMA_TO_DEVICE);
865 return -ENOMEM;
866 }
867 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
868 data_type, count, 1, 0, ASYNC);
869 edma_set_src(davinci_spi_dma->dma_rx_channel,
870 rx_reg, INCR, W8BIT);
871 edma_set_dest(davinci_spi_dma->dma_rx_channel,
872 t->rx_dma, INCR, W8BIT);
873 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
874 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
875 data_type, 0);
876 }
877
878 if ((t->tx_buf) || (t->rx_buf))
879 edma_start(davinci_spi_dma->dma_tx_channel);
880
881 if (t->rx_buf)
882 edma_start(davinci_spi_dma->dma_rx_channel);
883
884 if ((t->rx_buf) || (t->tx_buf))
885 davinci_spi_set_dma_req(spi, 1);
886
887 if (t->tx_buf)
888 wait_for_completion_interruptible(
889 &davinci_spi_dma->dma_tx_completion);
890
891 if (t->rx_buf)
892 wait_for_completion_interruptible(
893 &davinci_spi_dma->dma_rx_completion);
894
895 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
896
897 if (t->rx_buf)
898 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
899
900 /*
901 * Check for bit error, desync error,parity error,timeout error and
902 * receive overflow errors
903 */
904 int_status = ioread32(davinci_spi->base + SPIFLG);
905
906 ret = davinci_spi_check_error(davinci_spi, int_status);
907 if (ret != 0)
908 return ret;
909
910 /* SPI Framework maintains the count only in bytes so convert back */
911 davinci_spi->count *= conv;
912
913 return t->len;
914}
915
916/**
917 * davinci_spi_irq - IRQ handler for DaVinci SPI
918 * @irq: IRQ number for this SPI Master
919 * @context_data: structure for SPI Master controller davinci_spi
920 */
921static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
922{
923 struct davinci_spi *davinci_spi = context_data;
924 u32 int_status, rx_data = 0;
925 irqreturn_t ret = IRQ_NONE;
926
927 int_status = ioread32(davinci_spi->base + SPIFLG);
928
929 while ((int_status & SPIFLG_RX_INTR_MASK)) {
930 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
931 ret = IRQ_HANDLED;
932
933 rx_data = ioread32(davinci_spi->base + SPIBUF);
934 davinci_spi->get_rx(rx_data, davinci_spi);
935
936 /* Disable Receive Interrupt */
937 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
938 davinci_spi->base + SPIINT);
939 } else
940 (void)davinci_spi_check_error(davinci_spi, int_status);
941
942 int_status = ioread32(davinci_spi->base + SPIFLG);
943 }
944
945 return ret;
946}
947
948/**
949 * davinci_spi_probe - probe function for SPI Master Controller
950 * @pdev: platform_device structure which contains plateform specific data
951 */
952static int davinci_spi_probe(struct platform_device *pdev)
953{
954 struct spi_master *master;
955 struct davinci_spi *davinci_spi;
956 struct davinci_spi_platform_data *pdata;
957 struct resource *r, *mem;
958 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
959 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
960 resource_size_t dma_eventq = SPI_NO_RESOURCE;
961 int i = 0, ret = 0;
962
963 pdata = pdev->dev.platform_data;
964 if (pdata == NULL) {
965 ret = -ENODEV;
966 goto err;
967 }
968
969 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
970 if (master == NULL) {
971 ret = -ENOMEM;
972 goto err;
973 }
974
975 dev_set_drvdata(&pdev->dev, master);
976
977 davinci_spi = spi_master_get_devdata(master);
978 if (davinci_spi == NULL) {
979 ret = -ENOENT;
980 goto free_master;
981 }
982
983 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
984 if (r == NULL) {
985 ret = -ENOENT;
986 goto free_master;
987 }
988
989 davinci_spi->pbase = r->start;
990 davinci_spi->region_size = resource_size(r);
991 davinci_spi->pdata = pdata;
992
993 mem = request_mem_region(r->start, davinci_spi->region_size,
994 pdev->name);
995 if (mem == NULL) {
996 ret = -EBUSY;
997 goto free_master;
998 }
999
Sekhar Nori50356dd2010-10-08 15:27:26 +05301000 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001001 if (davinci_spi->base == NULL) {
1002 ret = -ENOMEM;
1003 goto release_region;
1004 }
1005
1006 davinci_spi->irq = platform_get_irq(pdev, 0);
1007 if (davinci_spi->irq <= 0) {
1008 ret = -EINVAL;
1009 goto unmap_io;
1010 }
1011
1012 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1013 dev_name(&pdev->dev), davinci_spi);
1014 if (ret)
1015 goto unmap_io;
1016
1017 /* Allocate tmp_buf for tx_buf */
1018 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1019 if (davinci_spi->tmp_buf == NULL) {
1020 ret = -ENOMEM;
1021 goto irq_free;
1022 }
1023
1024 davinci_spi->bitbang.master = spi_master_get(master);
1025 if (davinci_spi->bitbang.master == NULL) {
1026 ret = -ENODEV;
1027 goto free_tmp_buf;
1028 }
1029
1030 davinci_spi->clk = clk_get(&pdev->dev, NULL);
1031 if (IS_ERR(davinci_spi->clk)) {
1032 ret = -ENODEV;
1033 goto put_master;
1034 }
1035 clk_enable(davinci_spi->clk);
1036
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001037 master->bus_num = pdev->id;
1038 master->num_chipselect = pdata->num_chipselect;
1039 master->setup = davinci_spi_setup;
1040 master->cleanup = davinci_spi_cleanup;
1041
1042 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1043 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1044
1045 davinci_spi->version = pdata->version;
1046 use_dma = pdata->use_dma;
1047
1048 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1049 if (davinci_spi->version == SPI_VERSION_2)
1050 davinci_spi->bitbang.flags |= SPI_READY;
1051
1052 if (use_dma) {
Brian Niebuhr778e2612010-09-03 15:15:06 +05301053 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1054 if (r)
1055 dma_rx_chan = r->start;
1056 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1057 if (r)
1058 dma_tx_chan = r->start;
1059 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1060 if (r)
1061 dma_eventq = r->start;
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001062 }
1063
1064 if (!use_dma ||
1065 dma_rx_chan == SPI_NO_RESOURCE ||
1066 dma_tx_chan == SPI_NO_RESOURCE ||
1067 dma_eventq == SPI_NO_RESOURCE) {
1068 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1069 use_dma = 0;
1070 } else {
1071 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1072 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1073 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1074 if (davinci_spi->dma_channels == NULL) {
1075 ret = -ENOMEM;
1076 goto free_clk;
1077 }
1078
1079 for (i = 0; i < master->num_chipselect; i++) {
1080 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1081 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1082 dma_rx_chan;
1083 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1084 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1085 dma_tx_chan;
1086 davinci_spi->dma_channels[i].eventq = dma_eventq;
1087 }
1088 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1089 "Using RX channel = %d , TX channel = %d and "
1090 "event queue = %d", dma_rx_chan, dma_tx_chan,
1091 dma_eventq);
1092 }
1093
1094 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1095 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1096
1097 init_completion(&davinci_spi->done);
1098
1099 /* Reset In/OUT SPI module */
1100 iowrite32(0, davinci_spi->base + SPIGCR0);
1101 udelay(100);
1102 iowrite32(1, davinci_spi->base + SPIGCR0);
1103
1104 /* Clock internal */
1105 if (davinci_spi->pdata->clk_internal)
1106 set_io_bits(davinci_spi->base + SPIGCR1,
1107 SPIGCR1_CLKMOD_MASK);
1108 else
1109 clear_io_bits(davinci_spi->base + SPIGCR1,
1110 SPIGCR1_CLKMOD_MASK);
1111
Brian Niebuhr843a7132010-08-12 12:49:05 +05301112 iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
1113
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001114 /* master mode default */
1115 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1116
1117 if (davinci_spi->pdata->intr_level)
1118 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1119 else
1120 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1121
1122 ret = spi_bitbang_start(&davinci_spi->bitbang);
1123 if (ret)
1124 goto free_clk;
1125
Brian Niebuhr3b740b12010-09-03 14:50:07 +05301126 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001127
1128 if (!pdata->poll_mode)
1129 dev_info(&pdev->dev, "Operating in interrupt mode"
1130 " using IRQ %d\n", davinci_spi->irq);
1131
1132 return ret;
1133
1134free_clk:
1135 clk_disable(davinci_spi->clk);
1136 clk_put(davinci_spi->clk);
1137put_master:
1138 spi_master_put(master);
1139free_tmp_buf:
1140 kfree(davinci_spi->tmp_buf);
1141irq_free:
1142 free_irq(davinci_spi->irq, davinci_spi);
1143unmap_io:
1144 iounmap(davinci_spi->base);
1145release_region:
1146 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1147free_master:
1148 kfree(master);
1149err:
1150 return ret;
1151}
1152
1153/**
1154 * davinci_spi_remove - remove function for SPI Master Controller
1155 * @pdev: platform_device structure which contains plateform specific data
1156 *
1157 * This function will do the reverse action of davinci_spi_probe function
1158 * It will free the IRQ and SPI controller's memory region.
1159 * It will also call spi_bitbang_stop to destroy the work queue which was
1160 * created by spi_bitbang_start.
1161 */
1162static int __exit davinci_spi_remove(struct platform_device *pdev)
1163{
1164 struct davinci_spi *davinci_spi;
1165 struct spi_master *master;
1166
1167 master = dev_get_drvdata(&pdev->dev);
1168 davinci_spi = spi_master_get_devdata(master);
1169
1170 spi_bitbang_stop(&davinci_spi->bitbang);
1171
1172 clk_disable(davinci_spi->clk);
1173 clk_put(davinci_spi->clk);
1174 spi_master_put(master);
1175 kfree(davinci_spi->tmp_buf);
1176 free_irq(davinci_spi->irq, davinci_spi);
1177 iounmap(davinci_spi->base);
1178 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1179
1180 return 0;
1181}
1182
1183static struct platform_driver davinci_spi_driver = {
1184 .driver.name = "spi_davinci",
1185 .remove = __exit_p(davinci_spi_remove),
1186};
1187
1188static int __init davinci_spi_init(void)
1189{
1190 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1191}
1192module_init(davinci_spi_init);
1193
1194static void __exit davinci_spi_exit(void)
1195{
1196 platform_driver_unregister(&davinci_spi_driver);
1197}
1198module_exit(davinci_spi_exit);
1199
1200MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1201MODULE_LICENSE("GPL");