blob: 0afd87dd9459d27a46df8071e072f133848d6848 [file] [log] [blame]
Mika Westerberg011f23a2010-05-06 04:47:04 +00001/*
2 * Driver for Cirrus Logic EP93xx SPI controller.
3 *
Mika Westerberg626a96d2011-05-29 13:10:06 +03004 * Copyright (C) 2010-2011 Mika Westerberg
Mika Westerberg011f23a2010-05-06 04:47:04 +00005 *
6 * Explicit FIFO handling code was inspired by amba-pl022 driver.
7 *
8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
9 *
10 * For more information about the SPI controller see documentation on Cirrus
11 * Logic web site:
12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/io.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/delay.h>
23#include <linux/device.h>
Mika Westerberg626a96d2011-05-29 13:10:06 +030024#include <linux/dmaengine.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000025#include <linux/bitops.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/workqueue.h>
29#include <linux/sched.h>
Mika Westerberg626a96d2011-05-29 13:10:06 +030030#include <linux/scatterlist.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000031#include <linux/spi/spi.h>
32
Mika Westerberg626a96d2011-05-29 13:10:06 +030033#include <mach/dma.h>
Mika Westerberg011f23a2010-05-06 04:47:04 +000034#include <mach/ep93xx_spi.h>
35
36#define SSPCR0 0x0000
37#define SSPCR0_MODE_SHIFT 6
38#define SSPCR0_SCR_SHIFT 8
39
40#define SSPCR1 0x0004
41#define SSPCR1_RIE BIT(0)
42#define SSPCR1_TIE BIT(1)
43#define SSPCR1_RORIE BIT(2)
44#define SSPCR1_LBM BIT(3)
45#define SSPCR1_SSE BIT(4)
46#define SSPCR1_MS BIT(5)
47#define SSPCR1_SOD BIT(6)
48
49#define SSPDR 0x0008
50
51#define SSPSR 0x000c
52#define SSPSR_TFE BIT(0)
53#define SSPSR_TNF BIT(1)
54#define SSPSR_RNE BIT(2)
55#define SSPSR_RFF BIT(3)
56#define SSPSR_BSY BIT(4)
57#define SSPCPSR 0x0010
58
59#define SSPIIR 0x0014
60#define SSPIIR_RIS BIT(0)
61#define SSPIIR_TIS BIT(1)
62#define SSPIIR_RORIS BIT(2)
63#define SSPICR SSPIIR
64
65/* timeout in milliseconds */
66#define SPI_TIMEOUT 5
67/* maximum depth of RX/TX FIFO */
68#define SPI_FIFO_SIZE 8
69
70/**
71 * struct ep93xx_spi - EP93xx SPI controller structure
72 * @lock: spinlock that protects concurrent accesses to fields @running,
73 * @current_msg and @msg_queue
74 * @pdev: pointer to platform device
75 * @clk: clock for the controller
76 * @regs_base: pointer to ioremap()'d registers
Mika Westerberg626a96d2011-05-29 13:10:06 +030077 * @sspdr_phys: physical address of the SSPDR register
Mika Westerberg011f23a2010-05-06 04:47:04 +000078 * @irq: IRQ number used by the driver
79 * @min_rate: minimum clock rate (in Hz) supported by the controller
80 * @max_rate: maximum clock rate (in Hz) supported by the controller
81 * @running: is the queue running
82 * @wq: workqueue used by the driver
83 * @msg_work: work that is queued for the driver
84 * @wait: wait here until given transfer is completed
85 * @msg_queue: queue for the messages
86 * @current_msg: message that is currently processed (or %NULL if none)
87 * @tx: current byte in transfer to transmit
88 * @rx: current byte in transfer to receive
89 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
90 * frame decreases this level and sending one frame increases it.
Mika Westerberg626a96d2011-05-29 13:10:06 +030091 * @dma_rx: RX DMA channel
92 * @dma_tx: TX DMA channel
93 * @dma_rx_data: RX parameters passed to the DMA engine
94 * @dma_tx_data: TX parameters passed to the DMA engine
95 * @rx_sgt: sg table for RX transfers
96 * @tx_sgt: sg table for TX transfers
97 * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
98 * the client
Mika Westerberg011f23a2010-05-06 04:47:04 +000099 *
100 * This structure holds EP93xx SPI controller specific information. When
101 * @running is %true, driver accepts transfer requests from protocol drivers.
102 * @current_msg is used to hold pointer to the message that is currently
103 * processed. If @current_msg is %NULL, it means that no processing is going
104 * on.
105 *
106 * Most of the fields are only written once and they can be accessed without
107 * taking the @lock. Fields that are accessed concurrently are: @current_msg,
108 * @running, and @msg_queue.
109 */
110struct ep93xx_spi {
111 spinlock_t lock;
112 const struct platform_device *pdev;
113 struct clk *clk;
114 void __iomem *regs_base;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300115 unsigned long sspdr_phys;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000116 int irq;
117 unsigned long min_rate;
118 unsigned long max_rate;
119 bool running;
120 struct workqueue_struct *wq;
121 struct work_struct msg_work;
122 struct completion wait;
123 struct list_head msg_queue;
124 struct spi_message *current_msg;
125 size_t tx;
126 size_t rx;
127 size_t fifo_level;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300128 struct dma_chan *dma_rx;
129 struct dma_chan *dma_tx;
130 struct ep93xx_dma_data dma_rx_data;
131 struct ep93xx_dma_data dma_tx_data;
132 struct sg_table rx_sgt;
133 struct sg_table tx_sgt;
134 void *zeropage;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000135};
136
137/**
138 * struct ep93xx_spi_chip - SPI device hardware settings
139 * @spi: back pointer to the SPI device
140 * @rate: max rate in hz this chip supports
141 * @div_cpsr: cpsr (pre-scaler) divider
142 * @div_scr: scr divider
143 * @dss: bits per word (4 - 16 bits)
144 * @ops: private chip operations
145 *
146 * This structure is used to store hardware register specific settings for each
147 * SPI device. Settings are written to hardware by function
148 * ep93xx_spi_chip_setup().
149 */
150struct ep93xx_spi_chip {
151 const struct spi_device *spi;
152 unsigned long rate;
153 u8 div_cpsr;
154 u8 div_scr;
155 u8 dss;
156 struct ep93xx_spi_chip_ops *ops;
157};
158
159/* converts bits per word to CR0.DSS value */
160#define bits_per_word_to_dss(bpw) ((bpw) - 1)
161
162static inline void
163ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
164{
165 __raw_writeb(value, espi->regs_base + reg);
166}
167
168static inline u8
169ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
170{
171 return __raw_readb(spi->regs_base + reg);
172}
173
174static inline void
175ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
176{
177 __raw_writew(value, espi->regs_base + reg);
178}
179
180static inline u16
181ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
182{
183 return __raw_readw(spi->regs_base + reg);
184}
185
186static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
187{
188 u8 regval;
189 int err;
190
191 err = clk_enable(espi->clk);
192 if (err)
193 return err;
194
195 regval = ep93xx_spi_read_u8(espi, SSPCR1);
196 regval |= SSPCR1_SSE;
197 ep93xx_spi_write_u8(espi, SSPCR1, regval);
198
199 return 0;
200}
201
202static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
203{
204 u8 regval;
205
206 regval = ep93xx_spi_read_u8(espi, SSPCR1);
207 regval &= ~SSPCR1_SSE;
208 ep93xx_spi_write_u8(espi, SSPCR1, regval);
209
210 clk_disable(espi->clk);
211}
212
213static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
214{
215 u8 regval;
216
217 regval = ep93xx_spi_read_u8(espi, SSPCR1);
218 regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
219 ep93xx_spi_write_u8(espi, SSPCR1, regval);
220}
221
222static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
223{
224 u8 regval;
225
226 regval = ep93xx_spi_read_u8(espi, SSPCR1);
227 regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
228 ep93xx_spi_write_u8(espi, SSPCR1, regval);
229}
230
231/**
232 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
233 * @espi: ep93xx SPI controller struct
234 * @chip: divisors are calculated for this chip
235 * @rate: desired SPI output clock rate
236 *
237 * Function calculates cpsr (clock pre-scaler) and scr divisors based on
238 * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
239 * for some reason, divisors cannot be calculated nothing is stored and
240 * %-EINVAL is returned.
241 */
242static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
243 struct ep93xx_spi_chip *chip,
244 unsigned long rate)
245{
246 unsigned long spi_clk_rate = clk_get_rate(espi->clk);
247 int cpsr, scr;
248
249 /*
250 * Make sure that max value is between values supported by the
251 * controller. Note that minimum value is already checked in
252 * ep93xx_spi_transfer().
253 */
254 rate = clamp(rate, espi->min_rate, espi->max_rate);
255
256 /*
257 * Calculate divisors so that we can get speed according the
258 * following formula:
259 * rate = spi_clock_rate / (cpsr * (1 + scr))
260 *
261 * cpsr must be even number and starts from 2, scr can be any number
262 * between 0 and 255.
263 */
264 for (cpsr = 2; cpsr <= 254; cpsr += 2) {
265 for (scr = 0; scr <= 255; scr++) {
266 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
267 chip->div_scr = (u8)scr;
268 chip->div_cpsr = (u8)cpsr;
269 return 0;
270 }
271 }
272 }
273
274 return -EINVAL;
275}
276
277static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
278{
279 struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
280 int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
281
282 if (chip->ops && chip->ops->cs_control)
283 chip->ops->cs_control(spi, value);
284}
285
286/**
287 * ep93xx_spi_setup() - setup an SPI device
288 * @spi: SPI device to setup
289 *
290 * This function sets up SPI device mode, speed etc. Can be called multiple
291 * times for a single device. Returns %0 in case of success, negative error in
292 * case of failure. When this function returns success, the device is
293 * deselected.
294 */
295static int ep93xx_spi_setup(struct spi_device *spi)
296{
297 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
298 struct ep93xx_spi_chip *chip;
299
300 if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
301 dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
302 spi->bits_per_word);
303 return -EINVAL;
304 }
305
306 chip = spi_get_ctldata(spi);
307 if (!chip) {
308 dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
309 spi->modalias);
310
311 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
312 if (!chip)
313 return -ENOMEM;
314
315 chip->spi = spi;
316 chip->ops = spi->controller_data;
317
318 if (chip->ops && chip->ops->setup) {
319 int ret = chip->ops->setup(spi);
320 if (ret) {
321 kfree(chip);
322 return ret;
323 }
324 }
325
326 spi_set_ctldata(spi, chip);
327 }
328
329 if (spi->max_speed_hz != chip->rate) {
330 int err;
331
332 err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
333 if (err != 0) {
334 spi_set_ctldata(spi, NULL);
335 kfree(chip);
336 return err;
337 }
338 chip->rate = spi->max_speed_hz;
339 }
340
341 chip->dss = bits_per_word_to_dss(spi->bits_per_word);
342
343 ep93xx_spi_cs_control(spi, false);
344 return 0;
345}
346
347/**
348 * ep93xx_spi_transfer() - queue message to be transferred
349 * @spi: target SPI device
350 * @msg: message to be transferred
351 *
352 * This function is called by SPI device drivers when they are going to transfer
353 * a new message. It simply puts the message in the queue and schedules
354 * workqueue to perform the actual transfer later on.
355 *
356 * Returns %0 on success and negative error in case of failure.
357 */
358static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
359{
360 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
361 struct spi_transfer *t;
362 unsigned long flags;
363
364 if (!msg || !msg->complete)
365 return -EINVAL;
366
367 /* first validate each transfer */
368 list_for_each_entry(t, &msg->transfers, transfer_list) {
369 if (t->bits_per_word) {
370 if (t->bits_per_word < 4 || t->bits_per_word > 16)
371 return -EINVAL;
372 }
373 if (t->speed_hz && t->speed_hz < espi->min_rate)
374 return -EINVAL;
375 }
376
377 /*
378 * Now that we own the message, let's initialize it so that it is
379 * suitable for us. We use @msg->status to signal whether there was
380 * error in transfer and @msg->state is used to hold pointer to the
381 * current transfer (or %NULL if no active current transfer).
382 */
383 msg->state = NULL;
384 msg->status = 0;
385 msg->actual_length = 0;
386
387 spin_lock_irqsave(&espi->lock, flags);
388 if (!espi->running) {
389 spin_unlock_irqrestore(&espi->lock, flags);
390 return -ESHUTDOWN;
391 }
392 list_add_tail(&msg->queue, &espi->msg_queue);
393 queue_work(espi->wq, &espi->msg_work);
394 spin_unlock_irqrestore(&espi->lock, flags);
395
396 return 0;
397}
398
399/**
400 * ep93xx_spi_cleanup() - cleans up master controller specific state
401 * @spi: SPI device to cleanup
402 *
403 * This function releases master controller specific state for given @spi
404 * device.
405 */
406static void ep93xx_spi_cleanup(struct spi_device *spi)
407{
408 struct ep93xx_spi_chip *chip;
409
410 chip = spi_get_ctldata(spi);
411 if (chip) {
412 if (chip->ops && chip->ops->cleanup)
413 chip->ops->cleanup(spi);
414 spi_set_ctldata(spi, NULL);
415 kfree(chip);
416 }
417}
418
419/**
420 * ep93xx_spi_chip_setup() - configures hardware according to given @chip
421 * @espi: ep93xx SPI controller struct
422 * @chip: chip specific settings
423 *
424 * This function sets up the actual hardware registers with settings given in
425 * @chip. Note that no validation is done so make sure that callers validate
426 * settings before calling this.
427 */
428static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
429 const struct ep93xx_spi_chip *chip)
430{
431 u16 cr0;
432
433 cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
434 cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
435 cr0 |= chip->dss;
436
437 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
438 chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
439 dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
440
441 ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
442 ep93xx_spi_write_u16(espi, SSPCR0, cr0);
443}
444
445static inline int bits_per_word(const struct ep93xx_spi *espi)
446{
447 struct spi_message *msg = espi->current_msg;
448 struct spi_transfer *t = msg->state;
449
450 return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
451}
452
453static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
454{
455 if (bits_per_word(espi) > 8) {
456 u16 tx_val = 0;
457
458 if (t->tx_buf)
459 tx_val = ((u16 *)t->tx_buf)[espi->tx];
460 ep93xx_spi_write_u16(espi, SSPDR, tx_val);
461 espi->tx += sizeof(tx_val);
462 } else {
463 u8 tx_val = 0;
464
465 if (t->tx_buf)
466 tx_val = ((u8 *)t->tx_buf)[espi->tx];
467 ep93xx_spi_write_u8(espi, SSPDR, tx_val);
468 espi->tx += sizeof(tx_val);
469 }
470}
471
472static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
473{
474 if (bits_per_word(espi) > 8) {
475 u16 rx_val;
476
477 rx_val = ep93xx_spi_read_u16(espi, SSPDR);
478 if (t->rx_buf)
479 ((u16 *)t->rx_buf)[espi->rx] = rx_val;
480 espi->rx += sizeof(rx_val);
481 } else {
482 u8 rx_val;
483
484 rx_val = ep93xx_spi_read_u8(espi, SSPDR);
485 if (t->rx_buf)
486 ((u8 *)t->rx_buf)[espi->rx] = rx_val;
487 espi->rx += sizeof(rx_val);
488 }
489}
490
491/**
492 * ep93xx_spi_read_write() - perform next RX/TX transfer
493 * @espi: ep93xx SPI controller struct
494 *
495 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
496 * called several times, the whole transfer will be completed. Returns
497 * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
498 *
499 * When this function is finished, RX FIFO should be empty and TX FIFO should be
500 * full.
501 */
502static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
503{
504 struct spi_message *msg = espi->current_msg;
505 struct spi_transfer *t = msg->state;
506
507 /* read as long as RX FIFO has frames in it */
508 while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
509 ep93xx_do_read(espi, t);
510 espi->fifo_level--;
511 }
512
513 /* write as long as TX FIFO has room */
514 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
515 ep93xx_do_write(espi, t);
516 espi->fifo_level++;
517 }
518
Mika Westerberg626a96d2011-05-29 13:10:06 +0300519 if (espi->rx == t->len)
Mika Westerberg011f23a2010-05-06 04:47:04 +0000520 return 0;
Mika Westerberg011f23a2010-05-06 04:47:04 +0000521
522 return -EINPROGRESS;
523}
524
Mika Westerberg626a96d2011-05-29 13:10:06 +0300525static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
526{
527 /*
528 * Now everything is set up for the current transfer. We prime the TX
529 * FIFO, enable interrupts, and wait for the transfer to complete.
530 */
531 if (ep93xx_spi_read_write(espi)) {
532 ep93xx_spi_enable_interrupts(espi);
533 wait_for_completion(&espi->wait);
534 }
535}
536
537/**
538 * ep93xx_spi_dma_prepare() - prepares a DMA transfer
539 * @espi: ep93xx SPI controller struct
540 * @dir: DMA transfer direction
541 *
542 * Function configures the DMA, maps the buffer and prepares the DMA
543 * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
544 * in case of failure.
545 */
546static struct dma_async_tx_descriptor *
547ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
548{
549 struct spi_transfer *t = espi->current_msg->state;
550 struct dma_async_tx_descriptor *txd;
551 enum dma_slave_buswidth buswidth;
552 struct dma_slave_config conf;
Vinod Koula485df42011-10-14 10:47:38 +0530553 enum dma_transfer_direction slave_dirn;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300554 struct scatterlist *sg;
555 struct sg_table *sgt;
556 struct dma_chan *chan;
557 const void *buf, *pbuf;
558 size_t len = t->len;
559 int i, ret, nents;
560
561 if (bits_per_word(espi) > 8)
562 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
563 else
564 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
565
566 memset(&conf, 0, sizeof(conf));
567 conf.direction = dir;
568
569 if (dir == DMA_FROM_DEVICE) {
570 chan = espi->dma_rx;
571 buf = t->rx_buf;
572 sgt = &espi->rx_sgt;
573
574 conf.src_addr = espi->sspdr_phys;
575 conf.src_addr_width = buswidth;
Vinod Koula485df42011-10-14 10:47:38 +0530576 slave_dirn = DMA_DEV_TO_MEM;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300577 } else {
578 chan = espi->dma_tx;
579 buf = t->tx_buf;
580 sgt = &espi->tx_sgt;
581
582 conf.dst_addr = espi->sspdr_phys;
583 conf.dst_addr_width = buswidth;
Vinod Koula485df42011-10-14 10:47:38 +0530584 slave_dirn = DMA_MEM_TO_DEV;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300585 }
586
587 ret = dmaengine_slave_config(chan, &conf);
588 if (ret)
589 return ERR_PTR(ret);
590
591 /*
592 * We need to split the transfer into PAGE_SIZE'd chunks. This is
593 * because we are using @espi->zeropage to provide a zero RX buffer
594 * for the TX transfers and we have only allocated one page for that.
595 *
596 * For performance reasons we allocate a new sg_table only when
597 * needed. Otherwise we will re-use the current one. Eventually the
598 * last sg_table is released in ep93xx_spi_release_dma().
599 */
600
601 nents = DIV_ROUND_UP(len, PAGE_SIZE);
602 if (nents != sgt->nents) {
603 sg_free_table(sgt);
604
605 ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
606 if (ret)
607 return ERR_PTR(ret);
608 }
609
610 pbuf = buf;
611 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
612 size_t bytes = min_t(size_t, len, PAGE_SIZE);
613
614 if (buf) {
615 sg_set_page(sg, virt_to_page(pbuf), bytes,
616 offset_in_page(pbuf));
617 } else {
618 sg_set_page(sg, virt_to_page(espi->zeropage),
619 bytes, 0);
620 }
621
622 pbuf += bytes;
623 len -= bytes;
624 }
625
626 if (WARN_ON(len)) {
627 dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
628 return ERR_PTR(-EINVAL);
629 }
630
631 nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
632 if (!nents)
633 return ERR_PTR(-ENOMEM);
634
635 txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
Vinod Koula485df42011-10-14 10:47:38 +0530636 slave_dirn, DMA_CTRL_ACK);
Mika Westerberg626a96d2011-05-29 13:10:06 +0300637 if (!txd) {
638 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
639 return ERR_PTR(-ENOMEM);
640 }
641 return txd;
642}
643
644/**
645 * ep93xx_spi_dma_finish() - finishes with a DMA transfer
646 * @espi: ep93xx SPI controller struct
647 * @dir: DMA transfer direction
648 *
649 * Function finishes with the DMA transfer. After this, the DMA buffer is
650 * unmapped.
651 */
652static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
653 enum dma_data_direction dir)
654{
655 struct dma_chan *chan;
656 struct sg_table *sgt;
657
658 if (dir == DMA_FROM_DEVICE) {
659 chan = espi->dma_rx;
660 sgt = &espi->rx_sgt;
661 } else {
662 chan = espi->dma_tx;
663 sgt = &espi->tx_sgt;
664 }
665
666 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
667}
668
669static void ep93xx_spi_dma_callback(void *callback_param)
670{
671 complete(callback_param);
672}
673
674static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
675{
676 struct spi_message *msg = espi->current_msg;
677 struct dma_async_tx_descriptor *rxd, *txd;
678
679 rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
680 if (IS_ERR(rxd)) {
681 dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
682 msg->status = PTR_ERR(rxd);
683 return;
684 }
685
686 txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
687 if (IS_ERR(txd)) {
688 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
689 dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
690 msg->status = PTR_ERR(txd);
691 return;
692 }
693
694 /* We are ready when RX is done */
695 rxd->callback = ep93xx_spi_dma_callback;
696 rxd->callback_param = &espi->wait;
697
698 /* Now submit both descriptors and wait while they finish */
699 dmaengine_submit(rxd);
700 dmaengine_submit(txd);
701
702 dma_async_issue_pending(espi->dma_rx);
703 dma_async_issue_pending(espi->dma_tx);
704
705 wait_for_completion(&espi->wait);
706
707 ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
708 ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
709}
710
Mika Westerberg011f23a2010-05-06 04:47:04 +0000711/**
712 * ep93xx_spi_process_transfer() - processes one SPI transfer
713 * @espi: ep93xx SPI controller struct
714 * @msg: current message
715 * @t: transfer to process
716 *
717 * This function processes one SPI transfer given in @t. Function waits until
718 * transfer is complete (may sleep) and updates @msg->status based on whether
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300719 * transfer was successfully processed or not.
Mika Westerberg011f23a2010-05-06 04:47:04 +0000720 */
721static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
722 struct spi_message *msg,
723 struct spi_transfer *t)
724{
725 struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
726
727 msg->state = t;
728
729 /*
730 * Handle any transfer specific settings if needed. We use
731 * temporary chip settings here and restore original later when
732 * the transfer is finished.
733 */
734 if (t->speed_hz || t->bits_per_word) {
735 struct ep93xx_spi_chip tmp_chip = *chip;
736
737 if (t->speed_hz) {
738 int err;
739
740 err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
741 t->speed_hz);
742 if (err) {
743 dev_err(&espi->pdev->dev,
744 "failed to adjust speed\n");
745 msg->status = err;
746 return;
747 }
748 }
749
750 if (t->bits_per_word)
751 tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
752
753 /*
754 * Set up temporary new hw settings for this transfer.
755 */
756 ep93xx_spi_chip_setup(espi, &tmp_chip);
757 }
758
759 espi->rx = 0;
760 espi->tx = 0;
761
762 /*
Mika Westerberg626a96d2011-05-29 13:10:06 +0300763 * There is no point of setting up DMA for the transfers which will
764 * fit into the FIFO and can be transferred with a single interrupt.
765 * So in these cases we will be using PIO and don't bother for DMA.
Mika Westerberg011f23a2010-05-06 04:47:04 +0000766 */
Mika Westerberg626a96d2011-05-29 13:10:06 +0300767 if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
768 ep93xx_spi_dma_transfer(espi);
769 else
770 ep93xx_spi_pio_transfer(espi);
Mika Westerberg011f23a2010-05-06 04:47:04 +0000771
772 /*
773 * In case of error during transmit, we bail out from processing
774 * the message.
775 */
776 if (msg->status)
777 return;
778
Mika Westerberg626a96d2011-05-29 13:10:06 +0300779 msg->actual_length += t->len;
780
Mika Westerberg011f23a2010-05-06 04:47:04 +0000781 /*
782 * After this transfer is finished, perform any possible
783 * post-transfer actions requested by the protocol driver.
784 */
785 if (t->delay_usecs) {
786 set_current_state(TASK_UNINTERRUPTIBLE);
787 schedule_timeout(usecs_to_jiffies(t->delay_usecs));
788 }
789 if (t->cs_change) {
790 if (!list_is_last(&t->transfer_list, &msg->transfers)) {
791 /*
792 * In case protocol driver is asking us to drop the
793 * chipselect briefly, we let the scheduler to handle
794 * any "delay" here.
795 */
796 ep93xx_spi_cs_control(msg->spi, false);
797 cond_resched();
798 ep93xx_spi_cs_control(msg->spi, true);
799 }
800 }
801
802 if (t->speed_hz || t->bits_per_word)
803 ep93xx_spi_chip_setup(espi, chip);
804}
805
806/*
807 * ep93xx_spi_process_message() - process one SPI message
808 * @espi: ep93xx SPI controller struct
809 * @msg: message to process
810 *
811 * This function processes a single SPI message. We go through all transfers in
812 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
813 * asserted during the whole message (unless per transfer cs_change is set).
814 *
815 * @msg->status contains %0 in case of success or negative error code in case of
816 * failure.
817 */
818static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
819 struct spi_message *msg)
820{
821 unsigned long timeout;
822 struct spi_transfer *t;
823 int err;
824
825 /*
826 * Enable the SPI controller and its clock.
827 */
828 err = ep93xx_spi_enable(espi);
829 if (err) {
830 dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
831 msg->status = err;
832 return;
833 }
834
835 /*
836 * Just to be sure: flush any data from RX FIFO.
837 */
838 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
839 while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
840 if (time_after(jiffies, timeout)) {
841 dev_warn(&espi->pdev->dev,
842 "timeout while flushing RX FIFO\n");
843 msg->status = -ETIMEDOUT;
844 return;
845 }
846 ep93xx_spi_read_u16(espi, SSPDR);
847 }
848
849 /*
850 * We explicitly handle FIFO level. This way we don't have to check TX
851 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
852 */
853 espi->fifo_level = 0;
854
855 /*
856 * Update SPI controller registers according to spi device and assert
857 * the chipselect.
858 */
859 ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
860 ep93xx_spi_cs_control(msg->spi, true);
861
862 list_for_each_entry(t, &msg->transfers, transfer_list) {
863 ep93xx_spi_process_transfer(espi, msg, t);
864 if (msg->status)
865 break;
866 }
867
868 /*
869 * Now the whole message is transferred (or failed for some reason). We
870 * deselect the device and disable the SPI controller.
871 */
872 ep93xx_spi_cs_control(msg->spi, false);
873 ep93xx_spi_disable(espi);
874}
875
876#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
877
878/**
879 * ep93xx_spi_work() - EP93xx SPI workqueue worker function
880 * @work: work struct
881 *
882 * Workqueue worker function. This function is called when there are new
883 * SPI messages to be processed. Message is taken out from the queue and then
884 * passed to ep93xx_spi_process_message().
885 *
886 * After message is transferred, protocol driver is notified by calling
887 * @msg->complete(). In case of error, @msg->status is set to negative error
888 * number, otherwise it contains zero (and @msg->actual_length is updated).
889 */
890static void ep93xx_spi_work(struct work_struct *work)
891{
892 struct ep93xx_spi *espi = work_to_espi(work);
893 struct spi_message *msg;
894
895 spin_lock_irq(&espi->lock);
896 if (!espi->running || espi->current_msg ||
897 list_empty(&espi->msg_queue)) {
898 spin_unlock_irq(&espi->lock);
899 return;
900 }
901 msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
902 list_del_init(&msg->queue);
903 espi->current_msg = msg;
904 spin_unlock_irq(&espi->lock);
905
906 ep93xx_spi_process_message(espi, msg);
907
908 /*
909 * Update the current message and re-schedule ourselves if there are
910 * more messages in the queue.
911 */
912 spin_lock_irq(&espi->lock);
913 espi->current_msg = NULL;
914 if (espi->running && !list_empty(&espi->msg_queue))
915 queue_work(espi->wq, &espi->msg_work);
916 spin_unlock_irq(&espi->lock);
917
918 /* notify the protocol driver that we are done with this message */
919 msg->complete(msg->context);
920}
921
922static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
923{
924 struct ep93xx_spi *espi = dev_id;
925 u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
926
927 /*
928 * If we got ROR (receive overrun) interrupt we know that something is
929 * wrong. Just abort the message.
930 */
931 if (unlikely(irq_status & SSPIIR_RORIS)) {
932 /* clear the overrun interrupt */
933 ep93xx_spi_write_u8(espi, SSPICR, 0);
934 dev_warn(&espi->pdev->dev,
935 "receive overrun, aborting the message\n");
936 espi->current_msg->status = -EIO;
937 } else {
938 /*
939 * Interrupt is either RX (RIS) or TX (TIS). For both cases we
940 * simply execute next data transfer.
941 */
942 if (ep93xx_spi_read_write(espi)) {
943 /*
944 * In normal case, there still is some processing left
945 * for current transfer. Let's wait for the next
946 * interrupt then.
947 */
948 return IRQ_HANDLED;
949 }
950 }
951
952 /*
953 * Current transfer is finished, either with error or with success. In
954 * any case we disable interrupts and notify the worker to handle
955 * any post-processing of the message.
956 */
957 ep93xx_spi_disable_interrupts(espi);
958 complete(&espi->wait);
959 return IRQ_HANDLED;
960}
961
Mika Westerberg626a96d2011-05-29 13:10:06 +0300962static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
963{
964 if (ep93xx_dma_chan_is_m2p(chan))
965 return false;
966
967 chan->private = filter_param;
968 return true;
969}
970
971static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
972{
973 dma_cap_mask_t mask;
974 int ret;
975
976 espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
977 if (!espi->zeropage)
978 return -ENOMEM;
979
980 dma_cap_zero(mask);
981 dma_cap_set(DMA_SLAVE, mask);
982
983 espi->dma_rx_data.port = EP93XX_DMA_SSP;
Vinod Koula485df42011-10-14 10:47:38 +0530984 espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300985 espi->dma_rx_data.name = "ep93xx-spi-rx";
986
987 espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
988 &espi->dma_rx_data);
989 if (!espi->dma_rx) {
990 ret = -ENODEV;
991 goto fail_free_page;
992 }
993
994 espi->dma_tx_data.port = EP93XX_DMA_SSP;
Vinod Koula485df42011-10-14 10:47:38 +0530995 espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
Mika Westerberg626a96d2011-05-29 13:10:06 +0300996 espi->dma_tx_data.name = "ep93xx-spi-tx";
997
998 espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
999 &espi->dma_tx_data);
1000 if (!espi->dma_tx) {
1001 ret = -ENODEV;
1002 goto fail_release_rx;
1003 }
1004
1005 return 0;
1006
1007fail_release_rx:
1008 dma_release_channel(espi->dma_rx);
1009 espi->dma_rx = NULL;
1010fail_free_page:
1011 free_page((unsigned long)espi->zeropage);
1012
1013 return ret;
1014}
1015
1016static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
1017{
1018 if (espi->dma_rx) {
1019 dma_release_channel(espi->dma_rx);
1020 sg_free_table(&espi->rx_sgt);
1021 }
1022 if (espi->dma_tx) {
1023 dma_release_channel(espi->dma_tx);
1024 sg_free_table(&espi->tx_sgt);
1025 }
1026
1027 if (espi->zeropage)
1028 free_page((unsigned long)espi->zeropage);
1029}
1030
Mika Westerberg011f23a2010-05-06 04:47:04 +00001031static int __init ep93xx_spi_probe(struct platform_device *pdev)
1032{
1033 struct spi_master *master;
1034 struct ep93xx_spi_info *info;
1035 struct ep93xx_spi *espi;
1036 struct resource *res;
1037 int error;
1038
1039 info = pdev->dev.platform_data;
1040
1041 master = spi_alloc_master(&pdev->dev, sizeof(*espi));
1042 if (!master) {
1043 dev_err(&pdev->dev, "failed to allocate spi master\n");
1044 return -ENOMEM;
1045 }
1046
1047 master->setup = ep93xx_spi_setup;
1048 master->transfer = ep93xx_spi_transfer;
1049 master->cleanup = ep93xx_spi_cleanup;
1050 master->bus_num = pdev->id;
1051 master->num_chipselect = info->num_chipselect;
1052 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1053
1054 platform_set_drvdata(pdev, master);
1055
1056 espi = spi_master_get_devdata(master);
1057
1058 espi->clk = clk_get(&pdev->dev, NULL);
1059 if (IS_ERR(espi->clk)) {
1060 dev_err(&pdev->dev, "unable to get spi clock\n");
1061 error = PTR_ERR(espi->clk);
1062 goto fail_release_master;
1063 }
1064
1065 spin_lock_init(&espi->lock);
1066 init_completion(&espi->wait);
1067
1068 /*
1069 * Calculate maximum and minimum supported clock rates
1070 * for the controller.
1071 */
1072 espi->max_rate = clk_get_rate(espi->clk) / 2;
1073 espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
1074 espi->pdev = pdev;
1075
1076 espi->irq = platform_get_irq(pdev, 0);
1077 if (espi->irq < 0) {
1078 error = -EBUSY;
1079 dev_err(&pdev->dev, "failed to get irq resources\n");
1080 goto fail_put_clock;
1081 }
1082
1083 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1084 if (!res) {
1085 dev_err(&pdev->dev, "unable to get iomem resource\n");
1086 error = -ENODEV;
1087 goto fail_put_clock;
1088 }
1089
1090 res = request_mem_region(res->start, resource_size(res), pdev->name);
1091 if (!res) {
1092 dev_err(&pdev->dev, "unable to request iomem resources\n");
1093 error = -EBUSY;
1094 goto fail_put_clock;
1095 }
1096
Mika Westerberg626a96d2011-05-29 13:10:06 +03001097 espi->sspdr_phys = res->start + SSPDR;
Mika Westerberg011f23a2010-05-06 04:47:04 +00001098 espi->regs_base = ioremap(res->start, resource_size(res));
1099 if (!espi->regs_base) {
1100 dev_err(&pdev->dev, "failed to map resources\n");
1101 error = -ENODEV;
1102 goto fail_free_mem;
1103 }
1104
1105 error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
1106 "ep93xx-spi", espi);
1107 if (error) {
1108 dev_err(&pdev->dev, "failed to request irq\n");
1109 goto fail_unmap_regs;
1110 }
1111
Mika Westerberg626a96d2011-05-29 13:10:06 +03001112 if (info->use_dma && ep93xx_spi_setup_dma(espi))
1113 dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
1114
Mika Westerberg011f23a2010-05-06 04:47:04 +00001115 espi->wq = create_singlethread_workqueue("ep93xx_spid");
1116 if (!espi->wq) {
1117 dev_err(&pdev->dev, "unable to create workqueue\n");
Mika Westerberg626a96d2011-05-29 13:10:06 +03001118 goto fail_free_dma;
Mika Westerberg011f23a2010-05-06 04:47:04 +00001119 }
1120 INIT_WORK(&espi->msg_work, ep93xx_spi_work);
1121 INIT_LIST_HEAD(&espi->msg_queue);
1122 espi->running = true;
1123
1124 /* make sure that the hardware is disabled */
1125 ep93xx_spi_write_u8(espi, SSPCR1, 0);
1126
1127 error = spi_register_master(master);
1128 if (error) {
1129 dev_err(&pdev->dev, "failed to register SPI master\n");
1130 goto fail_free_queue;
1131 }
1132
1133 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
1134 (unsigned long)res->start, espi->irq);
1135
1136 return 0;
1137
1138fail_free_queue:
1139 destroy_workqueue(espi->wq);
Mika Westerberg626a96d2011-05-29 13:10:06 +03001140fail_free_dma:
1141 ep93xx_spi_release_dma(espi);
Mika Westerberg011f23a2010-05-06 04:47:04 +00001142 free_irq(espi->irq, espi);
1143fail_unmap_regs:
1144 iounmap(espi->regs_base);
1145fail_free_mem:
1146 release_mem_region(res->start, resource_size(res));
1147fail_put_clock:
1148 clk_put(espi->clk);
1149fail_release_master:
1150 spi_master_put(master);
1151 platform_set_drvdata(pdev, NULL);
1152
1153 return error;
1154}
1155
1156static int __exit ep93xx_spi_remove(struct platform_device *pdev)
1157{
1158 struct spi_master *master = platform_get_drvdata(pdev);
1159 struct ep93xx_spi *espi = spi_master_get_devdata(master);
1160 struct resource *res;
1161
1162 spin_lock_irq(&espi->lock);
1163 espi->running = false;
1164 spin_unlock_irq(&espi->lock);
1165
1166 destroy_workqueue(espi->wq);
1167
1168 /*
1169 * Complete remaining messages with %-ESHUTDOWN status.
1170 */
1171 spin_lock_irq(&espi->lock);
1172 while (!list_empty(&espi->msg_queue)) {
1173 struct spi_message *msg;
1174
1175 msg = list_first_entry(&espi->msg_queue,
1176 struct spi_message, queue);
1177 list_del_init(&msg->queue);
1178 msg->status = -ESHUTDOWN;
1179 spin_unlock_irq(&espi->lock);
1180 msg->complete(msg->context);
1181 spin_lock_irq(&espi->lock);
1182 }
1183 spin_unlock_irq(&espi->lock);
1184
Mika Westerberg626a96d2011-05-29 13:10:06 +03001185 ep93xx_spi_release_dma(espi);
Mika Westerberg011f23a2010-05-06 04:47:04 +00001186 free_irq(espi->irq, espi);
1187 iounmap(espi->regs_base);
1188 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1189 release_mem_region(res->start, resource_size(res));
1190 clk_put(espi->clk);
1191 platform_set_drvdata(pdev, NULL);
1192
1193 spi_unregister_master(master);
1194 return 0;
1195}
1196
1197static struct platform_driver ep93xx_spi_driver = {
1198 .driver = {
1199 .name = "ep93xx-spi",
1200 .owner = THIS_MODULE,
1201 },
1202 .remove = __exit_p(ep93xx_spi_remove),
1203};
1204
1205static int __init ep93xx_spi_init(void)
1206{
1207 return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe);
1208}
1209module_init(ep93xx_spi_init);
1210
1211static void __exit ep93xx_spi_exit(void)
1212{
1213 platform_driver_unregister(&ep93xx_spi_driver);
1214}
1215module_exit(ep93xx_spi_exit);
1216
1217MODULE_DESCRIPTION("EP93xx SPI Controller driver");
1218MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1219MODULE_LICENSE("GPL");
1220MODULE_ALIAS("platform:ep93xx-spi");