blob: 9ef08cdee246c883c65a7eed98027af95897b737 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/list.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/interrupt.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/workqueue.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <mach/msm_spi.h>
33#include <linux/dma-mapping.h>
34#include <linux/sched.h>
35#include <mach/dma.h>
36#include <asm/atomic.h>
37#include <linux/mutex.h>
38#include <linux/gpio.h>
39#include <linux/remote_spinlock.h>
40#include <linux/pm_qos_params.h>
41
42#define SPI_DRV_NAME "spi_qsd"
43#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
44
45#define QSD_REG(x) (x)
46#define QUP_REG(x)
47
48#define SPI_FIFO_WORD_CNT 0x0048
49
50#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
51
52#define QSD_REG(x)
53#define QUP_REG(x) (x)
54
55#define QUP_CONFIG 0x0000 /* N & NO_INPUT/NO_OUPUT bits */
56#define QUP_ERROR_FLAGS 0x0308
57#define QUP_ERROR_FLAGS_EN 0x030C
58#define QUP_ERR_MASK 0x3
59#define SPI_OUTPUT_FIFO_WORD_CNT 0x010C
60#define SPI_INPUT_FIFO_WORD_CNT 0x0214
61#define QUP_MX_WRITE_COUNT 0x0150
62#define QUP_MX_WRITE_CNT_CURRENT 0x0154
63
64#define QUP_CONFIG_SPI_MODE 0x0100
65
66#define GSBI_CTRL_REG 0x0
67#define GSBI_SPI_CONFIG 0x30
68#endif
69
70#define SPI_CONFIG QSD_REG(0x0000) QUP_REG(0x0300)
71#define SPI_IO_CONTROL QSD_REG(0x0004) QUP_REG(0x0304)
72#define SPI_IO_MODES QSD_REG(0x0008) QUP_REG(0x0008)
73#define SPI_SW_RESET QSD_REG(0x000C) QUP_REG(0x000C)
74#define SPI_TIME_OUT QSD_REG(0x0010) QUP_REG(0x0010)
75#define SPI_TIME_OUT_CURRENT QSD_REG(0x0014) QUP_REG(0x0014)
76#define SPI_MX_OUTPUT_COUNT QSD_REG(0x0018) QUP_REG(0x0100)
77#define SPI_MX_OUTPUT_CNT_CURRENT QSD_REG(0x001C) QUP_REG(0x0104)
78#define SPI_MX_INPUT_COUNT QSD_REG(0x0020) QUP_REG(0x0200)
79#define SPI_MX_INPUT_CNT_CURRENT QSD_REG(0x0024) QUP_REG(0x0204)
80#define SPI_MX_READ_COUNT QSD_REG(0x0028) QUP_REG(0x0208)
81#define SPI_MX_READ_CNT_CURRENT QSD_REG(0x002C) QUP_REG(0x020C)
82#define SPI_OPERATIONAL QSD_REG(0x0030) QUP_REG(0x0018)
83#define SPI_ERROR_FLAGS QSD_REG(0x0034) QUP_REG(0x001C)
84#define SPI_ERROR_FLAGS_EN QSD_REG(0x0038) QUP_REG(0x0020)
85#define SPI_DEASSERT_WAIT QSD_REG(0x003C) QUP_REG(0x0310)
86#define SPI_OUTPUT_DEBUG QSD_REG(0x0040) QUP_REG(0x0108)
87#define SPI_INPUT_DEBUG QSD_REG(0x0044) QUP_REG(0x0210)
88#define SPI_TEST_CTRL QSD_REG(0x004C) QUP_REG(0x0024)
89#define SPI_OUTPUT_FIFO QSD_REG(0x0100) QUP_REG(0x0110)
90#define SPI_INPUT_FIFO QSD_REG(0x0200) QUP_REG(0x0218)
91#define SPI_STATE QSD_REG(SPI_OPERATIONAL) QUP_REG(0x0004)
92
93/* SPI_CONFIG fields */
94#define SPI_CFG_INPUT_FIRST 0x00000200
95#define SPI_NO_INPUT 0x00000080
96#define SPI_NO_OUTPUT 0x00000040
97#define SPI_CFG_LOOPBACK 0x00000100
98#define SPI_CFG_N 0x0000001F
99
100/* SPI_IO_CONTROL fields */
101#define SPI_IO_C_CLK_IDLE_HIGH 0x00000400
102#define SPI_IO_C_MX_CS_MODE 0x00000100
103#define SPI_IO_C_CS_N_POLARITY 0x000000F0
104#define SPI_IO_C_CS_N_POLARITY_0 0x00000010
105#define SPI_IO_C_CS_SELECT 0x0000000C
106#define SPI_IO_C_TRISTATE_CS 0x00000002
107#define SPI_IO_C_NO_TRI_STATE 0x00000001
108
109/* SPI_IO_MODES fields */
110#define SPI_IO_M_OUTPUT_BIT_SHIFT_EN QSD_REG(0x00004000) QUP_REG(0x00010000)
111#define SPI_IO_M_PACK_EN QSD_REG(0x00002000) QUP_REG(0x00008000)
112#define SPI_IO_M_UNPACK_EN QSD_REG(0x00001000) QUP_REG(0x00004000)
113#define SPI_IO_M_INPUT_MODE QSD_REG(0x00000C00) QUP_REG(0x00003000)
114#define SPI_IO_M_OUTPUT_MODE QSD_REG(0x00000300) QUP_REG(0x00000C00)
115#define SPI_IO_M_INPUT_FIFO_SIZE QSD_REG(0x000000C0) QUP_REG(0x00000380)
116#define SPI_IO_M_INPUT_BLOCK_SIZE QSD_REG(0x00000030) QUP_REG(0x00000060)
117#define SPI_IO_M_OUTPUT_FIFO_SIZE QSD_REG(0x0000000C) QUP_REG(0x0000001C)
118#define SPI_IO_M_OUTPUT_BLOCK_SIZE QSD_REG(0x00000003) QUP_REG(0x00000003)
119
120#define INPUT_BLOCK_SZ_SHIFT QSD_REG(4) QUP_REG(5)
121#define INPUT_FIFO_SZ_SHIFT QSD_REG(6) QUP_REG(7)
122#define OUTPUT_BLOCK_SZ_SHIFT QSD_REG(0) QUP_REG(0)
123#define OUTPUT_FIFO_SZ_SHIFT QSD_REG(2) QUP_REG(2)
124#define OUTPUT_MODE_SHIFT QSD_REG(8) QUP_REG(10)
125#define INPUT_MODE_SHIFT QSD_REG(10) QUP_REG(12)
126
127/* SPI_OPERATIONAL fields */
128#define SPI_OP_MAX_INPUT_DONE_FLAG 0x00000800
129#define SPI_OP_MAX_OUTPUT_DONE_FLAG 0x00000400
130#define SPI_OP_INPUT_SERVICE_FLAG 0x00000200
131#define SPI_OP_OUTPUT_SERVICE_FLAG 0x00000100
132#define SPI_OP_INPUT_FIFO_FULL 0x00000080
133#define SPI_OP_OUTPUT_FIFO_FULL 0x00000040
134#define SPI_OP_IP_FIFO_NOT_EMPTY 0x00000020
135#define SPI_OP_OP_FIFO_NOT_EMPTY 0x00000010
136#define SPI_OP_STATE_VALID 0x00000004
137#define SPI_OP_STATE 0x00000003
138
139#define SPI_OP_STATE_CLEAR_BITS 0x2
140enum msm_spi_state {
141 SPI_OP_STATE_RESET = 0x00000000,
142 SPI_OP_STATE_RUN = 0x00000001,
143 SPI_OP_STATE_PAUSE = 0x00000003,
144};
145
146/* SPI_ERROR_FLAGS fields */
147#define SPI_ERR_OUTPUT_OVER_RUN_ERR 0x00000020
148#define SPI_ERR_INPUT_UNDER_RUN_ERR 0x00000010
149#define SPI_ERR_OUTPUT_UNDER_RUN_ERR 0x00000008
150#define SPI_ERR_INPUT_OVER_RUN_ERR 0x00000004
151#define SPI_ERR_CLK_OVER_RUN_ERR 0x00000002
152#define SPI_ERR_CLK_UNDER_RUN_ERR 0x00000001
153
154/* We don't allow transactions larger than 4K-64 or 64K-64 due to
155 mx_input/output_cnt register size */
156#define SPI_MAX_TRANSFERS QSD_REG(0xFC0) QUP_REG(0xFC0)
157#define SPI_MAX_LEN (SPI_MAX_TRANSFERS * dd->bytes_per_word)
158
159#define SPI_NUM_CHIPSELECTS 4
160#define SPI_SUPPORTED_MODES (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP)
161
162#define SPI_DELAY_THRESHOLD 1
163/* Default timeout is 10 milliseconds */
164#define SPI_DEFAULT_TIMEOUT 10
165/* 250 microseconds */
166#define SPI_TRYLOCK_DELAY 250
167
168/* Data Mover burst size */
169#define DM_BURST_SIZE 16
170/* Data Mover commands should be aligned to 64 bit(8 bytes) */
171#define DM_BYTE_ALIGN 8
172
173static char const * const spi_rsrcs[] = {
174 "spi_clk",
175 "spi_cs",
176 "spi_miso",
177 "spi_mosi"
178};
179
180enum msm_spi_mode {
181 SPI_FIFO_MODE = 0x0, /* 00 */
182 SPI_BLOCK_MODE = 0x1, /* 01 */
183 SPI_DMOV_MODE = 0x2, /* 10 */
184 SPI_MODE_NONE = 0xFF, /* invalid value */
185};
186
187/* Structures for Data Mover */
188struct spi_dmov_cmd {
189 dmov_box box; /* data aligned to max(dm_burst_size, block_size)
190 (<= fifo_size) */
191 dmov_s single_pad; /* data unaligned to max(dm_burst_size, block_size)
192 padded to fit */
193 dma_addr_t cmd_ptr;
194};
195
196MODULE_LICENSE("GPL v2");
197MODULE_VERSION("0.3");
198MODULE_ALIAS("platform:"SPI_DRV_NAME);
199
200static struct pm_qos_request_list qos_req_list;
201
202#ifdef CONFIG_DEBUG_FS
203/* Used to create debugfs entries */
204static const struct {
205 const char *name;
206 mode_t mode;
207 int offset;
208} debugfs_spi_regs[] = {
209 {"config", S_IRUGO | S_IWUSR, SPI_CONFIG},
210 {"io_control", S_IRUGO | S_IWUSR, SPI_IO_CONTROL},
211 {"io_modes", S_IRUGO | S_IWUSR, SPI_IO_MODES},
212 {"sw_reset", S_IWUSR, SPI_SW_RESET},
213 {"time_out", S_IRUGO | S_IWUSR, SPI_TIME_OUT},
214 {"time_out_current", S_IRUGO, SPI_TIME_OUT_CURRENT},
215 {"mx_output_count", S_IRUGO | S_IWUSR, SPI_MX_OUTPUT_COUNT},
216 {"mx_output_cnt_current", S_IRUGO, SPI_MX_OUTPUT_CNT_CURRENT},
217 {"mx_input_count", S_IRUGO | S_IWUSR, SPI_MX_INPUT_COUNT},
218 {"mx_input_cnt_current", S_IRUGO, SPI_MX_INPUT_CNT_CURRENT},
219 {"mx_read_count", S_IRUGO | S_IWUSR, SPI_MX_READ_COUNT},
220 {"mx_read_cnt_current", S_IRUGO, SPI_MX_READ_CNT_CURRENT},
221 {"operational", S_IRUGO | S_IWUSR, SPI_OPERATIONAL},
222 {"error_flags", S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS},
223 {"error_flags_en", S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS_EN},
224 {"deassert_wait", S_IRUGO | S_IWUSR, SPI_DEASSERT_WAIT},
225 {"output_debug", S_IRUGO, SPI_OUTPUT_DEBUG},
226 {"input_debug", S_IRUGO, SPI_INPUT_DEBUG},
227 {"test_ctrl", S_IRUGO | S_IWUSR, SPI_TEST_CTRL},
228 {"output_fifo", S_IWUSR, SPI_OUTPUT_FIFO},
229 {"input_fifo" , S_IRUSR, SPI_INPUT_FIFO},
230 {"spi_state", S_IRUGO | S_IWUSR, SPI_STATE},
231#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
232 {"fifo_word_cnt", S_IRUGO, SPI_FIFO_WORD_CNT},
233#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
234 {"qup_config", S_IRUGO | S_IWUSR, QUP_CONFIG},
235 {"qup_error_flags", S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS},
236 {"qup_error_flags_en", S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS_EN},
237 {"mx_write_cnt", S_IRUGO | S_IWUSR, QUP_MX_WRITE_COUNT},
238 {"mx_write_cnt_current", S_IRUGO, QUP_MX_WRITE_CNT_CURRENT},
239 {"output_fifo_word_cnt", S_IRUGO, SPI_OUTPUT_FIFO_WORD_CNT},
240 {"input_fifo_word_cnt", S_IRUGO, SPI_INPUT_FIFO_WORD_CNT},
241#endif
242};
243#endif
244
245struct msm_spi {
246 u8 *read_buf;
247 const u8 *write_buf;
248 void __iomem *base;
249 void __iomem *gsbi_base;
250 struct device *dev;
251 spinlock_t queue_lock;
252 struct mutex core_lock;
253 struct list_head queue;
254 struct workqueue_struct *workqueue;
255 struct work_struct work_data;
256 struct spi_message *cur_msg;
257 struct spi_transfer *cur_transfer;
258 struct completion transfer_complete;
259 struct clk *clk;
260 struct clk *pclk;
261 unsigned long mem_phys_addr;
262 size_t mem_size;
263 unsigned long gsbi_mem_phys_addr;
264 size_t gsbi_mem_size;
265 int input_fifo_size;
266 int output_fifo_size;
267 u32 rx_bytes_remaining;
268 u32 tx_bytes_remaining;
269 u32 clock_speed;
270 u32 irq_in;
271 int read_xfr_cnt;
272 int write_xfr_cnt;
273 int write_len;
274 int read_len;
275#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
276 u32 irq_out;
277 u32 irq_err;
278#endif
279 int bytes_per_word;
280 bool suspended;
281 bool transfer_pending;
282 wait_queue_head_t continue_suspend;
283 /* DMA data */
284 enum msm_spi_mode mode;
285 bool use_dma;
286 int tx_dma_chan;
287 int tx_dma_crci;
288 int rx_dma_chan;
289 int rx_dma_crci;
290 /* Data Mover Commands */
291 struct spi_dmov_cmd *tx_dmov_cmd;
292 struct spi_dmov_cmd *rx_dmov_cmd;
293 /* Physical address of the tx dmov box command */
294 dma_addr_t tx_dmov_cmd_dma;
295 dma_addr_t rx_dmov_cmd_dma;
296 struct msm_dmov_cmd tx_hdr;
297 struct msm_dmov_cmd rx_hdr;
298 int input_block_size;
299 int output_block_size;
300 int burst_size;
301 atomic_t rx_irq_called;
302 /* Used to pad messages unaligned to block size */
303 u8 *tx_padding;
304 dma_addr_t tx_padding_dma;
305 u8 *rx_padding;
306 dma_addr_t rx_padding_dma;
307 u32 unaligned_len;
308 /* DMA statistics */
309 int stat_dmov_tx_err;
310 int stat_dmov_rx_err;
311 int stat_rx;
312 int stat_dmov_rx;
313 int stat_tx;
314 int stat_dmov_tx;
315#ifdef CONFIG_DEBUG_FS
316 struct dentry *dent_spi;
317 struct dentry *debugfs_spi_regs[ARRAY_SIZE(debugfs_spi_regs)];
318#endif
319 struct msm_spi_platform_data *pdata; /* Platform data */
320 /* Remote Spinlock Data */
321 bool use_rlock;
322 remote_mutex_t r_lock;
323 uint32_t pm_lat;
324 /* When set indicates multiple transfers in a single message */
325 bool multi_xfr;
326 bool done;
327 u32 cur_msg_len;
328 /* Used in FIFO mode to keep track of the transfer being processed */
329 struct spi_transfer *cur_tx_transfer;
330 struct spi_transfer *cur_rx_transfer;
331 /* Temporary buffer used for WR-WR or WR-RD transfers */
332 u8 *temp_buf;
333 /* GPIO pin numbers for SPI clk, cs, miso and mosi */
334 int spi_gpios[ARRAY_SIZE(spi_rsrcs)];
335};
336
337/* Forward declaration */
338static irqreturn_t msm_spi_input_irq(int irq, void *dev_id);
339static irqreturn_t msm_spi_output_irq(int irq, void *dev_id);
340static irqreturn_t msm_spi_error_irq(int irq, void *dev_id);
341static inline int msm_spi_set_state(struct msm_spi *dd,
342 enum msm_spi_state state);
343static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
344static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
345
346#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
347/* Interrupt Handling */
348static inline int msm_spi_get_irq_data(struct msm_spi *dd,
349 struct platform_device *pdev)
350{
351 dd->irq_in = platform_get_irq_byname(pdev, "spi_irq_in");
352 dd->irq_out = platform_get_irq_byname(pdev, "spi_irq_out");
353 dd->irq_err = platform_get_irq_byname(pdev, "spi_irq_err");
354 if ((dd->irq_in < 0) || (dd->irq_out < 0) || (dd->irq_err < 0))
355 return -1;
356 return 0;
357}
358
359static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
360 struct platform_device *pdev)
361{
362 return 0;
363}
364
365static inline int msm_spi_request_gsbi(struct msm_spi *dd) { return 0; }
366static inline void msm_spi_release_gsbi(struct msm_spi *dd) {}
367static inline void msm_spi_init_gsbi(struct msm_spi *dd) {}
368
369static inline void msm_spi_disable_irqs(struct msm_spi *dd)
370{
371 disable_irq(dd->irq_in);
372 disable_irq(dd->irq_out);
373 disable_irq(dd->irq_err);
374}
375
376static inline void msm_spi_enable_irqs(struct msm_spi *dd)
377{
378 enable_irq(dd->irq_in);
379 enable_irq(dd->irq_out);
380 enable_irq(dd->irq_err);
381}
382
383static inline int msm_spi_request_irq(struct msm_spi *dd,
384 const char *name,
385 struct spi_master *master)
386{
387 int rc;
388 rc = request_irq(dd->irq_in, msm_spi_input_irq, IRQF_TRIGGER_RISING,
389 name, dd);
390 if (rc)
391 goto error_irq1;
392 rc = request_irq(dd->irq_out, msm_spi_output_irq, IRQF_TRIGGER_RISING,
393 name, dd);
394 if (rc)
395 goto error_irq2;
396 rc = request_irq(dd->irq_err, msm_spi_error_irq, IRQF_TRIGGER_RISING,
397 name, master);
398 if (rc)
399 goto error_irq3;
400 return 0;
401
402error_irq3:
403 free_irq(dd->irq_out, dd);
404error_irq2:
405 free_irq(dd->irq_in, dd);
406error_irq1:
407 return rc;
408}
409
410static inline void msm_spi_free_irq(struct msm_spi *dd,
411 struct spi_master *master)
412{
413 free_irq(dd->irq_err, master);
414 free_irq(dd->irq_out, dd);
415 free_irq(dd->irq_in, dd);
416}
417
418static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err) {}
419static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
420static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
421
422static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
423static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
424{
425 msm_spi_write_word_to_fifo(dd);
426}
427static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) {}
428
429static inline void msm_spi_complete(struct msm_spi *dd)
430{
431 complete(&dd->transfer_complete);
432}
433
434static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
435{
436 writel_relaxed(0x0000007B, dd->base + SPI_ERROR_FLAGS_EN);
437}
438
439static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
440{
441 writel_relaxed(0x0000007F, dd->base + SPI_ERROR_FLAGS);
442}
443
444#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
445
446/* Interrupt Handling */
447/* In QUP the same interrupt line is used for intput, output and error*/
448static inline int msm_spi_get_irq_data(struct msm_spi *dd,
449 struct platform_device *pdev)
450{
451 dd->irq_in = platform_get_irq_byname(pdev, "spi_irq_in");
452 if (dd->irq_in < 0)
453 return -1;
454 return 0;
455}
456
457static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
458 struct platform_device *pdev)
459{
460 struct resource *resource;
461
462 resource = platform_get_resource_byname(pdev,
463 IORESOURCE_MEM, "gsbi_base");
464 if (!resource)
465 return -ENXIO;
466 dd->gsbi_mem_phys_addr = resource->start;
467 dd->gsbi_mem_size = resource_size(resource);
468
469 return 0;
470}
471
472static inline void msm_spi_release_gsbi(struct msm_spi *dd)
473{
474 iounmap(dd->gsbi_base);
475 release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
476}
477
478static inline int msm_spi_request_gsbi(struct msm_spi *dd)
479{
480 if (!request_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size,
481 SPI_DRV_NAME)) {
482 return -ENXIO;
483 }
484 dd->gsbi_base = ioremap(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
485 if (!dd->gsbi_base) {
486 release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
487 return -ENXIO;
488 }
489 return 0;
490}
491
492static inline void msm_spi_init_gsbi(struct msm_spi *dd)
493{
494 /* Set GSBI to SPI mode, and CRCI_MUX_CTRL to SPI CRCI ports */
495 writel_relaxed(GSBI_SPI_CONFIG, dd->gsbi_base + GSBI_CTRL_REG);
496}
497
498/* Figure which irq occured and call the relevant functions */
499static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
500{
501 u32 op, ret = IRQ_NONE;
502 struct msm_spi *dd = dev_id;
503
504 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
505 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
506 struct spi_master *master = dev_get_drvdata(dd->dev);
507 ret |= msm_spi_error_irq(irq, master);
508 }
509
510 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
511 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
512 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
513 dd->base + SPI_OPERATIONAL);
514 /*
515 * Ensure service flag was cleared before further
516 * processing of interrupt.
517 */
518 mb();
519 ret |= msm_spi_input_irq(irq, dev_id);
520 }
521
522 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
523 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
524 dd->base + SPI_OPERATIONAL);
525 /*
526 * Ensure service flag was cleared before further
527 * processing of interrupt.
528 */
529 mb();
530 ret |= msm_spi_output_irq(irq, dev_id);
531 }
532
533 if (dd->done) {
534 complete(&dd->transfer_complete);
535 dd->done = 0;
536 }
537 return ret;
538}
539
540static inline int msm_spi_request_irq(struct msm_spi *dd,
541 const char *name,
542 struct spi_master *master)
543{
544 return request_irq(dd->irq_in, msm_spi_qup_irq, IRQF_TRIGGER_HIGH,
545 name, dd);
546}
547
548static inline void msm_spi_free_irq(struct msm_spi *dd,
549 struct spi_master *master)
550{
551 free_irq(dd->irq_in, dd);
552}
553
554static inline void msm_spi_free_output_irq(struct msm_spi *dd) { }
555static inline void msm_spi_free_error_irq(struct msm_spi *dd,
556 struct spi_master *master) { }
557
558static inline void msm_spi_disable_irqs(struct msm_spi *dd)
559{
560 disable_irq(dd->irq_in);
561}
562
563static inline void msm_spi_enable_irqs(struct msm_spi *dd)
564{
565 enable_irq(dd->irq_in);
566}
567
568static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err)
569{
570 *spi_err = readl_relaxed(dd->base + QUP_ERROR_FLAGS);
571}
572
573static inline void msm_spi_ack_clk_err(struct msm_spi *dd)
574{
575 writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
576}
577
578static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n);
579
580/* QUP has no_input, no_output, and N bits at QUP_CONFIG */
581static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
582{
583 u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
584
585 msm_spi_add_configs(dd, &qup_config, bpw-1);
586 writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE,
587 dd->base + QUP_CONFIG);
588}
589
590static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
591{
592 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
593 return -1;
594 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
595 return -1;
596 return 0;
597}
598
599static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
600{
601 if (read_count <= dd->input_fifo_size)
602 msm_spi_write_rmn_to_fifo(dd);
603 else
604 msm_spi_write_word_to_fifo(dd);
605}
606
607static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
608{
609 writel_relaxed(val, dd->base + QUP_MX_WRITE_COUNT);
610}
611
612static inline void msm_spi_complete(struct msm_spi *dd)
613{
614 dd->done = 1;
615}
616
617static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
618{
619 writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
620}
621
622static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
623{
624 writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
625}
626
627#endif
628
629static inline int msm_spi_request_gpios(struct msm_spi *dd)
630{
631 int i;
632 int result = 0;
633
634 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
635 if (dd->spi_gpios[i] >= 0) {
636 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
637 if (result) {
638 pr_err("%s: gpio_request for pin %d failed\
639 with error%d\n", __func__,
640 dd->spi_gpios[i], result);
641 goto error;
642 }
643 }
644 }
645 return 0;
646
647error:
648 for (; --i >= 0;) {
649 if (dd->spi_gpios[i] >= 0)
650 gpio_free(dd->spi_gpios[i]);
651 }
652 return result;
653}
654
655static inline void msm_spi_free_gpios(struct msm_spi *dd)
656{
657 int i;
658
659 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
660 if (dd->spi_gpios[i] >= 0)
661 gpio_free(dd->spi_gpios[i]);
662 }
663}
664
665static void msm_spi_clock_set(struct msm_spi *dd, int speed)
666{
667 int rc;
668
669 rc = clk_set_rate(dd->clk, speed);
670 if (!rc)
671 dd->clock_speed = speed;
672}
673
674static int msm_spi_calculate_size(int *fifo_size,
675 int *block_size,
676 int block,
677 int mult)
678{
679 int words;
680
681 switch (block) {
682 case 0:
683 words = 1; /* 4 bytes */
684 break;
685 case 1:
686 words = 4; /* 16 bytes */
687 break;
688 case 2:
689 words = 8; /* 32 bytes */
690 break;
691 default:
692 return -1;
693 }
694 switch (mult) {
695 case 0:
696 *fifo_size = words * 2;
697 break;
698 case 1:
699 *fifo_size = words * 4;
700 break;
701 case 2:
702 *fifo_size = words * 8;
703 break;
704 case 3:
705 *fifo_size = words * 16;
706 break;
707 default:
708 return -1;
709 }
710 *block_size = words * sizeof(u32); /* in bytes */
711 return 0;
712}
713
714static void get_next_transfer(struct msm_spi *dd)
715{
716 struct spi_transfer *t = dd->cur_transfer;
717
718 if (t->transfer_list.next != &dd->cur_msg->transfers) {
719 dd->cur_transfer = list_entry(t->transfer_list.next,
720 struct spi_transfer,
721 transfer_list);
722 dd->write_buf = dd->cur_transfer->tx_buf;
723 dd->read_buf = dd->cur_transfer->rx_buf;
724 }
725}
726
727static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
728{
729 u32 spi_iom;
730 int block;
731 int mult;
732
733 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
734
735 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
736 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
737 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
738 block, mult)) {
739 goto fifo_size_err;
740 }
741
742 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
743 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
744 if (msm_spi_calculate_size(&dd->output_fifo_size,
745 &dd->output_block_size, block, mult)) {
746 goto fifo_size_err;
747 }
748 /* DM mode is not available for this block size */
749 if (dd->input_block_size == 4 || dd->output_block_size == 4)
750 dd->use_dma = 0;
751
752 /* DM mode is currently unsupported for different block sizes */
753 if (dd->input_block_size != dd->output_block_size)
754 dd->use_dma = 0;
755
756 if (dd->use_dma)
757 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
758
759 return;
760
761fifo_size_err:
762 dd->use_dma = 0;
763 printk(KERN_WARNING "%s: invalid FIFO size, SPI_IO_MODES=0x%x\n",
764 __func__, spi_iom);
765 return;
766}
767
768static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
769{
770 u32 data_in;
771 int i;
772 int shift;
773
774 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
775 if (dd->read_buf) {
776 for (i = 0; (i < dd->bytes_per_word) &&
777 dd->rx_bytes_remaining; i++) {
778 /* The data format depends on bytes_per_word:
779 4 bytes: 0x12345678
780 3 bytes: 0x00123456
781 2 bytes: 0x00001234
782 1 byte : 0x00000012
783 */
784 shift = 8 * (dd->bytes_per_word - i - 1);
785 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
786 dd->rx_bytes_remaining--;
787 }
788 } else {
789 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
790 dd->rx_bytes_remaining -= dd->bytes_per_word;
791 else
792 dd->rx_bytes_remaining = 0;
793 }
794 dd->read_xfr_cnt++;
795 if (dd->multi_xfr) {
796 if (!dd->rx_bytes_remaining)
797 dd->read_xfr_cnt = 0;
798 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
799 dd->read_len) {
800 struct spi_transfer *t = dd->cur_rx_transfer;
801 if (t->transfer_list.next != &dd->cur_msg->transfers) {
802 t = list_entry(t->transfer_list.next,
803 struct spi_transfer,
804 transfer_list);
805 dd->read_buf = t->rx_buf;
806 dd->read_len = t->len;
807 dd->read_xfr_cnt = 0;
808 dd->cur_rx_transfer = t;
809 }
810 }
811 }
812}
813
814static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
815{
816 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
817
818 return spi_op & SPI_OP_STATE_VALID;
819}
820
821static inline int msm_spi_wait_valid(struct msm_spi *dd)
822{
823 unsigned long delay = 0;
824 unsigned long timeout = 0;
825
826 if (dd->clock_speed == 0)
827 return -EINVAL;
828 /*
829 * Based on the SPI clock speed, sufficient time
830 * should be given for the SPI state transition
831 * to occur
832 */
833 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
834 /*
835 * For small delay values, the default timeout would
836 * be one jiffy
837 */
838 if (delay < SPI_DELAY_THRESHOLD)
839 delay = SPI_DELAY_THRESHOLD;
840 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT);
841 while (!msm_spi_is_valid_state(dd)) {
842 if (time_after(jiffies, timeout)) {
843 if (dd->cur_msg)
844 dd->cur_msg->status = -EIO;
845 dev_err(dd->dev, "%s: SPI operational state not valid"
846 "\n", __func__);
847 return -1;
848 }
849 /*
850 * For smaller values of delay, context switch time
851 * would negate the usage of usleep
852 */
853 if (delay > 20)
854 usleep(delay);
855 else if (delay)
856 udelay(delay);
857 }
858 return 0;
859}
860
861static inline int msm_spi_set_state(struct msm_spi *dd,
862 enum msm_spi_state state)
863{
864 enum msm_spi_state cur_state;
865 if (msm_spi_wait_valid(dd))
866 return -1;
867 cur_state = readl_relaxed(dd->base + SPI_STATE);
868 /* Per spec:
869 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
870 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
871 (state == SPI_OP_STATE_RESET)) {
872 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
873 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
874 } else {
875 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
876 dd->base + SPI_STATE);
877 }
878 if (msm_spi_wait_valid(dd))
879 return -1;
880
881 return 0;
882}
883
884static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
885{
886 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
887
888 if (n != (*config & SPI_CFG_N))
889 *config = (*config & ~SPI_CFG_N) | n;
890
891 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
892 if (dd->read_buf == NULL)
893 *config |= SPI_NO_INPUT;
894 if (dd->write_buf == NULL)
895 *config |= SPI_NO_OUTPUT;
896 }
897}
898
899static void msm_spi_set_config(struct msm_spi *dd, int bpw)
900{
901 u32 spi_config;
902
903 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
904
905 if (dd->cur_msg->spi->mode & SPI_CPHA)
906 spi_config &= ~SPI_CFG_INPUT_FIRST;
907 else
908 spi_config |= SPI_CFG_INPUT_FIRST;
909 if (dd->cur_msg->spi->mode & SPI_LOOP)
910 spi_config |= SPI_CFG_LOOPBACK;
911 else
912 spi_config &= ~SPI_CFG_LOOPBACK;
913 msm_spi_add_configs(dd, &spi_config, bpw-1);
914 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
915 msm_spi_set_qup_config(dd, bpw);
916}
917
918static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
919{
920 dmov_box *box;
921 int bytes_to_send, num_rows, bytes_sent;
922 u32 num_transfers;
923
924 atomic_set(&dd->rx_irq_called, 0);
925 if (dd->write_len && !dd->read_len) {
926 /* WR-WR transfer */
927 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
928 dd->write_buf = dd->temp_buf;
929 } else {
930 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
931 /* For WR-RD transfer, bytes_sent can be negative */
932 if (bytes_sent < 0)
933 bytes_sent = 0;
934 }
935
936 /* We'll send in chunks of SPI_MAX_LEN if larger */
937 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
938 SPI_MAX_LEN : dd->tx_bytes_remaining;
939 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
940 dd->unaligned_len = bytes_to_send % dd->burst_size;
941 num_rows = bytes_to_send / dd->burst_size;
942
943 dd->mode = SPI_DMOV_MODE;
944
945 if (num_rows) {
946 /* src in 16 MSB, dst in 16 LSB */
947 box = &dd->tx_dmov_cmd->box;
948 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
949 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
950 box->num_rows = (num_rows << 16) | num_rows;
951 box->row_offset = (dd->burst_size << 16) | 0;
952
953 box = &dd->rx_dmov_cmd->box;
954 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
955 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
956 box->num_rows = (num_rows << 16) | num_rows;
957 box->row_offset = (0 << 16) | dd->burst_size;
958
959 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
960 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
961 offsetof(struct spi_dmov_cmd, box));
962 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
963 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
964 offsetof(struct spi_dmov_cmd, box));
965 } else {
966 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
967 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
968 offsetof(struct spi_dmov_cmd, single_pad));
969 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
970 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
971 offsetof(struct spi_dmov_cmd, single_pad));
972 }
973
974 if (!dd->unaligned_len) {
975 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
976 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
977 } else {
978 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
979 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
980 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
981
982 if ((dd->multi_xfr) && (dd->read_len <= 0))
983 offset = dd->cur_msg_len - dd->unaligned_len;
984
985 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
986 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
987
988 memset(dd->tx_padding, 0, dd->burst_size);
989 memset(dd->rx_padding, 0, dd->burst_size);
990 if (dd->write_buf)
991 memcpy(dd->tx_padding, dd->write_buf + offset,
992 dd->unaligned_len);
993
994 tx_cmd->src = dd->tx_padding_dma;
995 rx_cmd->dst = dd->rx_padding_dma;
996 tx_cmd->len = rx_cmd->len = dd->burst_size;
997 }
998 /* This also takes care of the padding dummy buf
999 Since this is set to the correct length, the
1000 dummy bytes won't be actually sent */
1001 if (dd->multi_xfr) {
1002 u32 write_transfers = 0;
1003 u32 read_transfers = 0;
1004
1005 if (dd->write_len > 0) {
1006 write_transfers = DIV_ROUND_UP(dd->write_len,
1007 dd->bytes_per_word);
1008 writel_relaxed(write_transfers,
1009 dd->base + SPI_MX_OUTPUT_COUNT);
1010 }
1011 if (dd->read_len > 0) {
1012 /*
1013 * The read following a write transfer must take
1014 * into account, that the bytes pertaining to
1015 * the write transfer needs to be discarded,
1016 * before the actual read begins.
1017 */
1018 read_transfers = DIV_ROUND_UP(dd->read_len +
1019 dd->write_len,
1020 dd->bytes_per_word);
1021 writel_relaxed(read_transfers,
1022 dd->base + SPI_MX_INPUT_COUNT);
1023 }
1024 } else {
1025 if (dd->write_buf)
1026 writel_relaxed(num_transfers,
1027 dd->base + SPI_MX_OUTPUT_COUNT);
1028 if (dd->read_buf)
1029 writel_relaxed(num_transfers,
1030 dd->base + SPI_MX_INPUT_COUNT);
1031 }
1032}
1033
1034static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
1035{
1036 dma_coherent_pre_ops();
1037 if (dd->write_buf)
1038 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
1039 if (dd->read_buf)
1040 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
1041}
1042
1043/* SPI core can send maximum of 4K transfers, because there is HW problem
1044 with infinite mode.
1045 Therefore, we are sending several chunks of 3K or less (depending on how
1046 much is left).
1047 Upon completion we send the next chunk, or complete the transfer if
1048 everything is finished.
1049*/
1050static int msm_spi_dm_send_next(struct msm_spi *dd)
1051{
1052 /* By now we should have sent all the bytes in FIFO mode,
1053 * However to make things right, we'll check anyway.
1054 */
1055 if (dd->mode != SPI_DMOV_MODE)
1056 return 0;
1057
1058 /* We need to send more chunks, if we sent max last time */
1059 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
1060 dd->tx_bytes_remaining -= SPI_MAX_LEN;
1061 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1062 return 0;
1063 dd->read_len = dd->write_len = 0;
1064 msm_spi_setup_dm_transfer(dd);
1065 msm_spi_enqueue_dm_commands(dd);
1066 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1067 return 0;
1068 return 1;
1069 } else if (dd->read_len && dd->write_len) {
1070 dd->tx_bytes_remaining -= dd->cur_transfer->len;
1071 if (list_is_last(&dd->cur_transfer->transfer_list,
1072 &dd->cur_msg->transfers))
1073 return 0;
1074 get_next_transfer(dd);
1075 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
1076 return 0;
1077 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
1078 dd->read_buf = dd->temp_buf;
1079 dd->read_len = dd->write_len = -1;
1080 msm_spi_setup_dm_transfer(dd);
1081 msm_spi_enqueue_dm_commands(dd);
1082 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1083 return 0;
1084 return 1;
1085 }
1086 return 0;
1087}
1088
1089static inline void msm_spi_ack_transfer(struct msm_spi *dd)
1090{
1091 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
1092 SPI_OP_MAX_OUTPUT_DONE_FLAG,
1093 dd->base + SPI_OPERATIONAL);
1094 /* Ensure done flag was cleared before proceeding further */
1095 mb();
1096}
1097
1098static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
1099{
1100 struct msm_spi *dd = dev_id;
1101
1102 dd->stat_rx++;
1103
1104 if (dd->mode == SPI_MODE_NONE)
1105 return IRQ_HANDLED;
1106
1107 if (dd->mode == SPI_DMOV_MODE) {
1108 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1109 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
1110 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
1111 msm_spi_ack_transfer(dd);
1112 if (dd->unaligned_len == 0) {
1113 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1114 return IRQ_HANDLED;
1115 }
1116 msm_spi_complete(dd);
1117 return IRQ_HANDLED;
1118 }
1119 return IRQ_NONE;
1120 }
1121
1122 if (dd->mode == SPI_FIFO_MODE) {
1123 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
1124 SPI_OP_IP_FIFO_NOT_EMPTY) &&
1125 (dd->rx_bytes_remaining > 0)) {
1126 msm_spi_read_word_from_fifo(dd);
1127 }
1128 if (dd->rx_bytes_remaining == 0)
1129 msm_spi_complete(dd);
1130 }
1131
1132 return IRQ_HANDLED;
1133}
1134
1135static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
1136{
1137 u32 word;
1138 u8 byte;
1139 int i;
1140
1141 word = 0;
1142 if (dd->write_buf) {
1143 for (i = 0; (i < dd->bytes_per_word) &&
1144 dd->tx_bytes_remaining; i++) {
1145 dd->tx_bytes_remaining--;
1146 byte = *dd->write_buf++;
1147 word |= (byte << (BITS_PER_BYTE * (3 - i)));
1148 }
1149 } else
1150 if (dd->tx_bytes_remaining > dd->bytes_per_word)
1151 dd->tx_bytes_remaining -= dd->bytes_per_word;
1152 else
1153 dd->tx_bytes_remaining = 0;
1154 dd->write_xfr_cnt++;
1155 if (dd->multi_xfr) {
1156 if (!dd->tx_bytes_remaining)
1157 dd->write_xfr_cnt = 0;
1158 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
1159 dd->write_len) {
1160 struct spi_transfer *t = dd->cur_tx_transfer;
1161 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1162 t = list_entry(t->transfer_list.next,
1163 struct spi_transfer,
1164 transfer_list);
1165 dd->write_buf = t->tx_buf;
1166 dd->write_len = t->len;
1167 dd->write_xfr_cnt = 0;
1168 dd->cur_tx_transfer = t;
1169 }
1170 }
1171 }
1172 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
1173}
1174
1175static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
1176{
1177 int count = 0;
1178
1179 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
1180 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
1181 SPI_OP_OUTPUT_FIFO_FULL)) {
1182 msm_spi_write_word_to_fifo(dd);
1183 count++;
1184 }
1185}
1186
1187static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
1188{
1189 struct msm_spi *dd = dev_id;
1190
1191 dd->stat_tx++;
1192
1193 if (dd->mode == SPI_MODE_NONE)
1194 return IRQ_HANDLED;
1195
1196 if (dd->mode == SPI_DMOV_MODE) {
1197 /* TX_ONLY transaction is handled here
1198 This is the only place we send complete at tx and not rx */
1199 if (dd->read_buf == NULL &&
1200 readl_relaxed(dd->base + SPI_OPERATIONAL) &
1201 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
1202 msm_spi_ack_transfer(dd);
1203 msm_spi_complete(dd);
1204 return IRQ_HANDLED;
1205 }
1206 return IRQ_NONE;
1207 }
1208
1209 /* Output FIFO is empty. Transmit any outstanding write data. */
1210 if (dd->mode == SPI_FIFO_MODE)
1211 msm_spi_write_rmn_to_fifo(dd);
1212
1213 return IRQ_HANDLED;
1214}
1215
1216static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1217{
1218 struct spi_master *master = dev_id;
1219 struct msm_spi *dd = spi_master_get_devdata(master);
1220 u32 spi_err;
1221
1222 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1223 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1224 dev_warn(master->dev.parent, "SPI output overrun error\n");
1225 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1226 dev_warn(master->dev.parent, "SPI input underrun error\n");
1227 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1228 dev_warn(master->dev.parent, "SPI output underrun error\n");
1229 msm_spi_get_clk_err(dd, &spi_err);
1230 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1231 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1232 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1233 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1234 msm_spi_clear_error_flags(dd);
1235 msm_spi_ack_clk_err(dd);
1236 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1237 mb();
1238 return IRQ_HANDLED;
1239}
1240
1241static int msm_spi_map_dma_buffers(struct msm_spi *dd)
1242{
1243 struct device *dev;
1244 struct spi_transfer *first_xfr;
1245 struct spi_transfer *nxt_xfr;
1246 void *tx_buf, *rx_buf;
1247 unsigned tx_len, rx_len;
1248 int ret = -EINVAL;
1249
1250 dev = &dd->cur_msg->spi->dev;
1251 first_xfr = dd->cur_transfer;
1252 tx_buf = (void *)first_xfr->tx_buf;
1253 rx_buf = first_xfr->rx_buf;
1254 tx_len = rx_len = first_xfr->len;
1255
1256 /*
1257 * For WR-WR and WR-RD transfers, we allocate our own temporary
1258 * buffer and copy the data to/from the client buffers.
1259 */
1260 if (dd->multi_xfr) {
1261 dd->temp_buf = kzalloc(dd->cur_msg_len,
1262 GFP_KERNEL | __GFP_DMA);
1263 if (!dd->temp_buf)
1264 return -ENOMEM;
1265 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1266 struct spi_transfer, transfer_list);
1267
1268 if (dd->write_len && !dd->read_len) {
1269 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1270 goto error;
1271
1272 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1273 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1274 nxt_xfr->len);
1275 tx_buf = dd->temp_buf;
1276 tx_len = dd->cur_msg_len;
1277 } else {
1278 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1279 goto error;
1280
1281 rx_buf = dd->temp_buf;
1282 rx_len = dd->cur_msg_len;
1283 }
1284 }
1285 if (tx_buf != NULL) {
1286 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1287 tx_len, DMA_TO_DEVICE);
1288 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1289 dev_err(dev, "dma %cX %d bytes error\n",
1290 'T', tx_len);
1291 ret = -ENOMEM;
1292 goto error;
1293 }
1294 }
1295 if (rx_buf != NULL) {
1296 dma_addr_t dma_handle;
1297 dma_handle = dma_map_single(dev, rx_buf,
1298 rx_len, DMA_FROM_DEVICE);
1299 if (dma_mapping_error(NULL, dma_handle)) {
1300 dev_err(dev, "dma %cX %d bytes error\n",
1301 'R', rx_len);
1302 if (tx_buf != NULL)
1303 dma_unmap_single(NULL, first_xfr->tx_dma,
1304 tx_len, DMA_TO_DEVICE);
1305 ret = -ENOMEM;
1306 goto error;
1307 }
1308 if (dd->multi_xfr)
1309 nxt_xfr->rx_dma = dma_handle;
1310 else
1311 first_xfr->rx_dma = dma_handle;
1312 }
1313 return 0;
1314
1315error:
1316 kfree(dd->temp_buf);
1317 dd->temp_buf = NULL;
1318 return ret;
1319}
1320
1321static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
1322{
1323 struct device *dev;
1324 u32 offset;
1325
1326 dev = &dd->cur_msg->spi->dev;
1327 if (dd->cur_msg->is_dma_mapped)
1328 goto unmap_end;
1329
1330 if (dd->multi_xfr) {
1331 if (dd->write_len && !dd->read_len) {
1332 dma_unmap_single(dev,
1333 dd->cur_transfer->tx_dma,
1334 dd->cur_msg_len,
1335 DMA_TO_DEVICE);
1336 } else {
1337 struct spi_transfer *prev_xfr;
1338 prev_xfr = list_entry(
1339 dd->cur_transfer->transfer_list.prev,
1340 struct spi_transfer,
1341 transfer_list);
1342 if (dd->cur_transfer->rx_buf) {
1343 dma_unmap_single(dev,
1344 dd->cur_transfer->rx_dma,
1345 dd->cur_msg_len,
1346 DMA_FROM_DEVICE);
1347 }
1348 if (prev_xfr->tx_buf) {
1349 dma_unmap_single(dev,
1350 prev_xfr->tx_dma,
1351 prev_xfr->len,
1352 DMA_TO_DEVICE);
1353 }
1354 if (dd->unaligned_len && dd->read_buf) {
1355 offset = dd->cur_msg_len - dd->unaligned_len;
1356 dma_coherent_post_ops();
1357 memcpy(dd->read_buf + offset, dd->rx_padding,
1358 dd->unaligned_len);
1359 memcpy(dd->cur_transfer->rx_buf,
1360 dd->read_buf + prev_xfr->len,
1361 dd->cur_transfer->len);
1362 }
1363 }
1364 kfree(dd->temp_buf);
1365 dd->temp_buf = NULL;
1366 return;
1367 } else {
1368 if (dd->cur_transfer->rx_buf)
1369 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1370 dd->cur_transfer->len,
1371 DMA_FROM_DEVICE);
1372 if (dd->cur_transfer->tx_buf)
1373 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1374 dd->cur_transfer->len,
1375 DMA_TO_DEVICE);
1376 }
1377
1378unmap_end:
1379 /* If we padded the transfer, we copy it from the padding buf */
1380 if (dd->unaligned_len && dd->read_buf) {
1381 offset = dd->cur_transfer->len - dd->unaligned_len;
1382 dma_coherent_post_ops();
1383 memcpy(dd->read_buf + offset, dd->rx_padding,
1384 dd->unaligned_len);
1385 }
1386}
1387
1388/**
1389 * msm_use_dm - decides whether to use data mover for this
1390 * transfer
1391 * @dd: device
1392 * @tr: transfer
1393 *
1394 * Start using DM if:
1395 * 1. Transfer is longer than 3*block size.
1396 * 2. Buffers should be aligned to cache line.
1397 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
1398 */
1399static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
1400 u8 bpw)
1401{
1402 u32 cache_line = dma_get_cache_alignment();
1403
1404 if (!dd->use_dma)
1405 return 0;
1406
1407 if (dd->cur_msg_len < 3*dd->input_block_size)
1408 return 0;
1409
1410 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
1411 return 0;
1412
1413 if (tr->tx_buf) {
1414 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1415 return 0;
1416 }
1417 if (tr->rx_buf) {
1418 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1419 return 0;
1420 }
1421
1422 if (tr->cs_change &&
1423 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
1424 return 0;
1425 return 1;
1426}
1427
1428static void msm_spi_process_transfer(struct msm_spi *dd)
1429{
1430 u8 bpw;
1431 u32 spi_ioc;
1432 u32 spi_iom;
1433 u32 spi_ioc_orig;
1434 u32 max_speed;
1435 u32 chip_select;
1436 u32 read_count;
1437 u32 timeout;
1438 u32 int_loopback = 0;
1439
1440 dd->tx_bytes_remaining = dd->cur_msg_len;
1441 dd->rx_bytes_remaining = dd->cur_msg_len;
1442 dd->read_buf = dd->cur_transfer->rx_buf;
1443 dd->write_buf = dd->cur_transfer->tx_buf;
1444 init_completion(&dd->transfer_complete);
1445 if (dd->cur_transfer->bits_per_word)
1446 bpw = dd->cur_transfer->bits_per_word;
1447 else
1448 if (dd->cur_msg->spi->bits_per_word)
1449 bpw = dd->cur_msg->spi->bits_per_word;
1450 else
1451 bpw = 8;
1452 dd->bytes_per_word = (bpw + 7) / 8;
1453
1454 if (dd->cur_transfer->speed_hz)
1455 max_speed = dd->cur_transfer->speed_hz;
1456 else
1457 max_speed = dd->cur_msg->spi->max_speed_hz;
1458 if (!dd->clock_speed || max_speed != dd->clock_speed)
1459 msm_spi_clock_set(dd, max_speed);
1460
1461 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1462 if (dd->cur_msg->spi->mode & SPI_LOOP)
1463 int_loopback = 1;
1464 if (int_loopback && dd->multi_xfr &&
1465 (read_count > dd->input_fifo_size)) {
1466 if (dd->read_len && dd->write_len)
1467 printk(KERN_WARNING
1468 "%s:Internal Loopback does not support > fifo size\
1469 for write-then-read transactions\n",
1470 __func__);
1471 else if (dd->write_len && !dd->read_len)
1472 printk(KERN_WARNING
1473 "%s:Internal Loopback does not support > fifo size\
1474 for write-then-write transactions\n",
1475 __func__);
1476 return;
1477 }
1478 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1479 dd->mode = SPI_FIFO_MODE;
1480 if (dd->multi_xfr) {
1481 dd->read_len = dd->cur_transfer->len;
1482 dd->write_len = dd->cur_transfer->len;
1483 }
1484 /* read_count cannot exceed fifo_size, and only one READ COUNT
1485 interrupt is generated per transaction, so for transactions
1486 larger than fifo size READ COUNT must be disabled.
1487 For those transactions we usually move to Data Mover mode.
1488 */
1489 if (read_count <= dd->input_fifo_size) {
1490 writel_relaxed(read_count,
1491 dd->base + SPI_MX_READ_COUNT);
1492 msm_spi_set_write_count(dd, read_count);
1493 } else {
1494 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1495 msm_spi_set_write_count(dd, 0);
1496 }
1497 } else {
1498 dd->mode = SPI_DMOV_MODE;
1499 if (dd->write_len && dd->read_len) {
1500 dd->tx_bytes_remaining = dd->write_len;
1501 dd->rx_bytes_remaining = dd->read_len;
1502 }
1503 }
1504
1505 /* Write mode - fifo or data mover*/
1506 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1507 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1508 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1509 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1510 /* Turn on packing for data mover */
1511 if (dd->mode == SPI_DMOV_MODE)
1512 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1513 else
1514 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1515 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1516
1517 msm_spi_set_config(dd, bpw);
1518
1519 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1520 spi_ioc_orig = spi_ioc;
1521 if (dd->cur_msg->spi->mode & SPI_CPOL)
1522 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1523 else
1524 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1525 chip_select = dd->cur_msg->spi->chip_select << 2;
1526 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1527 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1528 if (!dd->cur_transfer->cs_change)
1529 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1530 if (spi_ioc != spi_ioc_orig)
1531 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1532
1533 if (dd->mode == SPI_DMOV_MODE) {
1534 msm_spi_setup_dm_transfer(dd);
1535 msm_spi_enqueue_dm_commands(dd);
1536 }
1537 /* The output fifo interrupt handler will handle all writes after
1538 the first. Restricting this to one write avoids contention
1539 issues and race conditions between this thread and the int handler
1540 */
1541 else if (dd->mode == SPI_FIFO_MODE) {
1542 if (msm_spi_prepare_for_write(dd))
1543 goto transfer_end;
1544 msm_spi_start_write(dd, read_count);
1545 }
1546
1547 /* Only enter the RUN state after the first word is written into
1548 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1549 might fire before the first word is written resulting in a
1550 possible race condition.
1551 */
1552 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1553 goto transfer_end;
1554
1555 timeout = 100 * msecs_to_jiffies(
1556 DIV_ROUND_UP(dd->cur_msg_len * 8,
1557 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1558
1559 /* Assume success, this might change later upon transaction result */
1560 dd->cur_msg->status = 0;
1561 do {
1562 if (!wait_for_completion_timeout(&dd->transfer_complete,
1563 timeout)) {
1564 dev_err(dd->dev, "%s: SPI transaction "
1565 "timeout\n", __func__);
1566 dd->cur_msg->status = -EIO;
1567 if (dd->mode == SPI_DMOV_MODE) {
1568 msm_dmov_flush(dd->tx_dma_chan);
1569 msm_dmov_flush(dd->rx_dma_chan);
1570 }
1571 break;
1572 }
1573 } while (msm_spi_dm_send_next(dd));
1574
1575transfer_end:
1576 if (dd->mode == SPI_DMOV_MODE)
1577 msm_spi_unmap_dma_buffers(dd);
1578 dd->mode = SPI_MODE_NONE;
1579
1580 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1581 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1582 dd->base + SPI_IO_CONTROL);
1583}
1584
1585static void get_transfer_length(struct msm_spi *dd)
1586{
1587 struct spi_transfer *tr;
1588 int num_xfrs = 0;
1589 int readlen = 0;
1590 int writelen = 0;
1591
1592 dd->cur_msg_len = 0;
1593 dd->multi_xfr = 0;
1594 dd->read_len = dd->write_len = 0;
1595
1596 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1597 if (tr->tx_buf)
1598 writelen += tr->len;
1599 if (tr->rx_buf)
1600 readlen += tr->len;
1601 dd->cur_msg_len += tr->len;
1602 num_xfrs++;
1603 }
1604
1605 if (num_xfrs == 2) {
1606 struct spi_transfer *first_xfr = dd->cur_transfer;
1607
1608 dd->multi_xfr = 1;
1609 tr = list_entry(first_xfr->transfer_list.next,
1610 struct spi_transfer,
1611 transfer_list);
1612 /*
1613 * We update dd->read_len and dd->write_len only
1614 * for WR-WR and WR-RD transfers.
1615 */
1616 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1617 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1618 ((!tr->tx_buf) && (tr->rx_buf))) {
1619 dd->read_len = readlen;
1620 dd->write_len = writelen;
1621 }
1622 }
1623 } else if (num_xfrs > 1)
1624 dd->multi_xfr = 1;
1625}
1626
1627static inline int combine_transfers(struct msm_spi *dd)
1628{
1629 struct spi_transfer *t = dd->cur_transfer;
1630 struct spi_transfer *nxt;
1631 int xfrs_grped = 1;
1632
1633 dd->cur_msg_len = dd->cur_transfer->len;
1634 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1635 nxt = list_entry(t->transfer_list.next,
1636 struct spi_transfer,
1637 transfer_list);
1638 if (t->cs_change != nxt->cs_change)
1639 return xfrs_grped;
1640 dd->cur_msg_len += nxt->len;
1641 xfrs_grped++;
1642 t = nxt;
1643 }
1644 return xfrs_grped;
1645}
1646
1647static void msm_spi_process_message(struct msm_spi *dd)
1648{
1649 int xfrs_grped = 0;
1650 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
1651
1652 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1653 struct spi_transfer,
1654 transfer_list);
1655 get_transfer_length(dd);
1656 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1657 /* Handling of multi-transfers. FIFO mode is used by default */
1658 list_for_each_entry(dd->cur_transfer,
1659 &dd->cur_msg->transfers,
1660 transfer_list) {
1661 if (!dd->cur_transfer->len)
1662 return;
1663 if (xfrs_grped) {
1664 xfrs_grped--;
1665 continue;
1666 } else {
1667 dd->read_len = dd->write_len = 0;
1668 xfrs_grped = combine_transfers(dd);
1669 }
1670 dd->cur_tx_transfer = dd->cur_transfer;
1671 dd->cur_rx_transfer = dd->cur_transfer;
1672 msm_spi_process_transfer(dd);
1673 xfrs_grped--;
1674 }
1675 } else {
1676 /* Handling of a single transfer or WR-WR or WR-RD transfers */
1677 if ((!dd->cur_msg->is_dma_mapped) &&
1678 (msm_use_dm(dd, dd->cur_transfer,
1679 dd->cur_transfer->bits_per_word))) {
1680 /* Mapping of DMA buffers */
1681 int ret = msm_spi_map_dma_buffers(dd);
1682 if (ret < 0) {
1683 dd->cur_msg->status = ret;
1684 return;
1685 }
1686 }
1687 dd->cur_tx_transfer = dd->cur_rx_transfer = dd->cur_transfer;
1688 msm_spi_process_transfer(dd);
1689 }
1690}
1691
1692/* workqueue - pull messages from queue & process */
1693static void msm_spi_workq(struct work_struct *work)
1694{
1695 struct msm_spi *dd =
1696 container_of(work, struct msm_spi, work_data);
1697 unsigned long flags;
1698 u32 status_error = 0;
1699
1700 mutex_lock(&dd->core_lock);
1701
1702 /* Don't allow power collapse until we release mutex */
1703 if (pm_qos_request_active(&qos_req_list))
1704 pm_qos_update_request(&qos_req_list,
1705 dd->pm_lat);
1706 if (dd->use_rlock)
1707 remote_mutex_lock(&dd->r_lock);
1708
1709 clk_enable(dd->clk);
1710 clk_enable(dd->pclk);
1711 msm_spi_enable_irqs(dd);
1712
1713 if (!msm_spi_is_valid_state(dd)) {
1714 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1715 __func__);
1716 status_error = 1;
1717 }
1718
1719 spin_lock_irqsave(&dd->queue_lock, flags);
1720 while (!list_empty(&dd->queue)) {
1721 dd->cur_msg = list_entry(dd->queue.next,
1722 struct spi_message, queue);
1723 list_del_init(&dd->cur_msg->queue);
1724 spin_unlock_irqrestore(&dd->queue_lock, flags);
1725 if (status_error)
1726 dd->cur_msg->status = -EIO;
1727 else
1728 msm_spi_process_message(dd);
1729 if (dd->cur_msg->complete)
1730 dd->cur_msg->complete(dd->cur_msg->context);
1731 spin_lock_irqsave(&dd->queue_lock, flags);
1732 }
1733 dd->transfer_pending = 0;
1734 spin_unlock_irqrestore(&dd->queue_lock, flags);
1735
1736 msm_spi_disable_irqs(dd);
1737 clk_disable(dd->clk);
1738 clk_disable(dd->pclk);
1739
1740 if (dd->use_rlock)
1741 remote_mutex_unlock(&dd->r_lock);
1742
1743 if (pm_qos_request_active(&qos_req_list))
1744 pm_qos_update_request(&qos_req_list,
1745 PM_QOS_DEFAULT_VALUE);
1746
1747 mutex_unlock(&dd->core_lock);
1748 /* If needed, this can be done after the current message is complete,
1749 and work can be continued upon resume. No motivation for now. */
1750 if (dd->suspended)
1751 wake_up_interruptible(&dd->continue_suspend);
1752}
1753
1754static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1755{
1756 struct msm_spi *dd;
1757 unsigned long flags;
1758 struct spi_transfer *tr;
1759
1760 dd = spi_master_get_devdata(spi->master);
1761 if (dd->suspended)
1762 return -EBUSY;
1763
1764 if (list_empty(&msg->transfers) || !msg->complete)
1765 return -EINVAL;
1766
1767 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1768 /* Check message parameters */
1769 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1770 (tr->bits_per_word &&
1771 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1772 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1773 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1774 "tx=%p, rx=%p\n",
1775 tr->speed_hz, tr->bits_per_word,
1776 tr->tx_buf, tr->rx_buf);
1777 return -EINVAL;
1778 }
1779 }
1780
1781 spin_lock_irqsave(&dd->queue_lock, flags);
1782 if (dd->suspended) {
1783 spin_unlock_irqrestore(&dd->queue_lock, flags);
1784 return -EBUSY;
1785 }
1786 dd->transfer_pending = 1;
1787 list_add_tail(&msg->queue, &dd->queue);
1788 spin_unlock_irqrestore(&dd->queue_lock, flags);
1789 queue_work(dd->workqueue, &dd->work_data);
1790 return 0;
1791}
1792
1793static int msm_spi_setup(struct spi_device *spi)
1794{
1795 struct msm_spi *dd;
1796 int rc = 0;
1797 u32 spi_ioc;
1798 u32 spi_config;
1799 u32 mask;
1800
1801 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1802 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1803 __func__, spi->bits_per_word);
1804 rc = -EINVAL;
1805 }
1806 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1807 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1808 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1809 rc = -EINVAL;
1810 }
1811
1812 if (rc)
1813 goto err_setup_exit;
1814
1815 dd = spi_master_get_devdata(spi->master);
1816
1817 mutex_lock(&dd->core_lock);
1818 if (dd->suspended) {
1819 mutex_unlock(&dd->core_lock);
1820 return -EBUSY;
1821 }
1822
1823 if (dd->use_rlock)
1824 remote_mutex_lock(&dd->r_lock);
1825
1826 clk_enable(dd->clk);
1827 clk_enable(dd->pclk);
1828
1829 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1830 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1831 if (spi->mode & SPI_CS_HIGH)
1832 spi_ioc |= mask;
1833 else
1834 spi_ioc &= ~mask;
1835 if (spi->mode & SPI_CPOL)
1836 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1837 else
1838 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1839
1840 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1841
1842 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1843 if (spi->mode & SPI_LOOP)
1844 spi_config |= SPI_CFG_LOOPBACK;
1845 else
1846 spi_config &= ~SPI_CFG_LOOPBACK;
1847 if (spi->mode & SPI_CPHA)
1848 spi_config &= ~SPI_CFG_INPUT_FIRST;
1849 else
1850 spi_config |= SPI_CFG_INPUT_FIRST;
1851 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1852
1853 /* Ensure previous write completed before disabling the clocks */
1854 mb();
1855 clk_disable(dd->clk);
1856 clk_disable(dd->pclk);
1857
1858 if (dd->use_rlock)
1859 remote_mutex_unlock(&dd->r_lock);
1860 mutex_unlock(&dd->core_lock);
1861
1862err_setup_exit:
1863 return rc;
1864}
1865
1866#ifdef CONFIG_DEBUG_FS
1867static int debugfs_iomem_x32_set(void *data, u64 val)
1868{
1869 writel_relaxed(val, data);
1870 /* Ensure the previous write completed. */
1871 mb();
1872 return 0;
1873}
1874
1875static int debugfs_iomem_x32_get(void *data, u64 *val)
1876{
1877 *val = readl_relaxed(data);
1878 /* Ensure the previous read completed. */
1879 mb();
1880 return 0;
1881}
1882
1883DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1884 debugfs_iomem_x32_set, "0x%08llx\n");
1885
1886static void spi_debugfs_init(struct msm_spi *dd)
1887{
1888 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1889 if (dd->dent_spi) {
1890 int i;
1891 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1892 dd->debugfs_spi_regs[i] =
1893 debugfs_create_file(
1894 debugfs_spi_regs[i].name,
1895 debugfs_spi_regs[i].mode,
1896 dd->dent_spi,
1897 dd->base + debugfs_spi_regs[i].offset,
1898 &fops_iomem_x32);
1899 }
1900 }
1901}
1902
1903static void spi_debugfs_exit(struct msm_spi *dd)
1904{
1905 if (dd->dent_spi) {
1906 int i;
1907 debugfs_remove_recursive(dd->dent_spi);
1908 dd->dent_spi = NULL;
1909 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1910 dd->debugfs_spi_regs[i] = NULL;
1911 }
1912}
1913#else
1914static void spi_debugfs_init(struct msm_spi *dd) {}
1915static void spi_debugfs_exit(struct msm_spi *dd) {}
1916#endif
1917
1918/* ===Device attributes begin=== */
1919static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1920 char *buf)
1921{
1922 struct spi_master *master = dev_get_drvdata(dev);
1923 struct msm_spi *dd = spi_master_get_devdata(master);
1924
1925 return snprintf(buf, PAGE_SIZE,
1926 "Device %s\n"
1927 "rx fifo_size = %d spi words\n"
1928 "tx fifo_size = %d spi words\n"
1929 "use_dma ? %s\n"
1930 "rx block size = %d bytes\n"
1931 "tx block size = %d bytes\n"
1932 "burst size = %d bytes\n"
1933 "DMA configuration:\n"
1934 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1935 "--statistics--\n"
1936 "Rx isrs = %d\n"
1937 "Tx isrs = %d\n"
1938 "DMA error = %d\n"
1939 "--debug--\n"
1940 "NA yet\n",
1941 dev_name(dev),
1942 dd->input_fifo_size,
1943 dd->output_fifo_size,
1944 dd->use_dma ? "yes" : "no",
1945 dd->input_block_size,
1946 dd->output_block_size,
1947 dd->burst_size,
1948 dd->tx_dma_chan,
1949 dd->rx_dma_chan,
1950 dd->tx_dma_crci,
1951 dd->rx_dma_crci,
1952 dd->stat_rx + dd->stat_dmov_rx,
1953 dd->stat_tx + dd->stat_dmov_tx,
1954 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1955 );
1956}
1957
1958/* Reset statistics on write */
1959static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1960 const char *buf, size_t count)
1961{
1962 struct msm_spi *dd = dev_get_drvdata(dev);
1963 dd->stat_rx = 0;
1964 dd->stat_tx = 0;
1965 dd->stat_dmov_rx = 0;
1966 dd->stat_dmov_tx = 0;
1967 dd->stat_dmov_rx_err = 0;
1968 dd->stat_dmov_tx_err = 0;
1969 return count;
1970}
1971
1972static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1973
1974static struct attribute *dev_attrs[] = {
1975 &dev_attr_stats.attr,
1976 NULL,
1977};
1978
1979static struct attribute_group dev_attr_grp = {
1980 .attrs = dev_attrs,
1981};
1982/* ===Device attributes end=== */
1983
1984/**
1985 * spi_dmov_tx_complete_func - DataMover tx completion callback
1986 *
1987 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1988 * spinlock @msm_dmov_lock held.
1989 */
1990static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1991 unsigned int result,
1992 struct msm_dmov_errdata *err)
1993{
1994 struct msm_spi *dd;
1995
1996 if (!(result & DMOV_RSLT_VALID)) {
1997 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1998 return;
1999 }
2000 /* restore original context */
2001 dd = container_of(cmd, struct msm_spi, tx_hdr);
2002 if (result & DMOV_RSLT_DONE)
2003 dd->stat_dmov_tx++;
2004 else {
2005 /* Error or flush */
2006 if (result & DMOV_RSLT_ERROR) {
2007 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2008 dd->stat_dmov_tx_err++;
2009 }
2010 if (result & DMOV_RSLT_FLUSH) {
2011 /*
2012 * Flushing normally happens in process of
2013 * removing, when we are waiting for outstanding
2014 * DMA commands to be flushed.
2015 */
2016 dev_info(dd->dev,
2017 "DMA channel flushed (0x%08x)\n", result);
2018 }
2019 if (err)
2020 dev_err(dd->dev,
2021 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2022 err->flush[0], err->flush[1], err->flush[2],
2023 err->flush[3], err->flush[4], err->flush[5]);
2024 dd->cur_msg->status = -EIO;
2025 complete(&dd->transfer_complete);
2026 }
2027}
2028
2029/**
2030 * spi_dmov_rx_complete_func - DataMover rx completion callback
2031 *
2032 * Executed in IRQ context (Data Mover's IRQ)
2033 * DataMover's spinlock @msm_dmov_lock held.
2034 */
2035static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2036 unsigned int result,
2037 struct msm_dmov_errdata *err)
2038{
2039 struct msm_spi *dd;
2040
2041 if (!(result & DMOV_RSLT_VALID)) {
2042 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2043 result, cmd);
2044 return;
2045 }
2046 /* restore original context */
2047 dd = container_of(cmd, struct msm_spi, rx_hdr);
2048 if (result & DMOV_RSLT_DONE) {
2049 dd->stat_dmov_rx++;
2050 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2051 return;
2052 complete(&dd->transfer_complete);
2053 } else {
2054 /** Error or flush */
2055 if (result & DMOV_RSLT_ERROR) {
2056 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2057 dd->stat_dmov_rx_err++;
2058 }
2059 if (result & DMOV_RSLT_FLUSH) {
2060 dev_info(dd->dev,
2061 "DMA channel flushed(0x%08x)\n", result);
2062 }
2063 if (err)
2064 dev_err(dd->dev,
2065 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2066 err->flush[0], err->flush[1], err->flush[2],
2067 err->flush[3], err->flush[4], err->flush[5]);
2068 dd->cur_msg->status = -EIO;
2069 complete(&dd->transfer_complete);
2070 }
2071}
2072
2073static inline u32 get_chunk_size(struct msm_spi *dd)
2074{
2075 u32 cache_line = dma_get_cache_alignment();
2076
2077 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
2078 roundup(dd->burst_size, cache_line))*2;
2079}
2080
2081static void msm_spi_teardown_dma(struct msm_spi *dd)
2082{
2083 int limit = 0;
2084
2085 if (!dd->use_dma)
2086 return;
2087
2088 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
2089 msm_dmov_flush(dd->tx_dma_chan);
2090 msm_dmov_flush(dd->rx_dma_chan);
2091 msleep(10);
2092 }
2093
2094 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
2095 dd->tx_dmov_cmd_dma);
2096 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2097 dd->tx_padding = dd->rx_padding = NULL;
2098}
2099
2100static __init int msm_spi_init_dma(struct msm_spi *dd)
2101{
2102 dmov_box *box;
2103 u32 cache_line = dma_get_cache_alignment();
2104
2105 /* Allocate all as one chunk, since all is smaller than page size */
2106
2107 /* We send NULL device, since it requires coherent_dma_mask id
2108 device definition, we're okay with using system pool */
2109 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
2110 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
2111 if (dd->tx_dmov_cmd == NULL)
2112 return -ENOMEM;
2113
2114 /* DMA addresses should be 64 bit aligned aligned */
2115 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2116 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2117 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2118 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2119
2120 /* Buffers should be aligned to cache line */
2121 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2122 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2123 sizeof(struct spi_dmov_cmd), cache_line);
2124 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
2125 cache_line);
2126 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
2127 cache_line);
2128
2129 /* Setup DM commands */
2130 box = &(dd->rx_dmov_cmd->box);
2131 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2132 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2133 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2134 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2135 offsetof(struct spi_dmov_cmd, cmd_ptr));
2136 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
2137 dd->rx_hdr.crci_mask = msm_dmov_build_crci_mask(1, dd->rx_dma_crci);
2138
2139 box = &(dd->tx_dmov_cmd->box);
2140 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2141 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2142 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2143 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2144 offsetof(struct spi_dmov_cmd, cmd_ptr));
2145 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
2146 dd->tx_hdr.crci_mask = msm_dmov_build_crci_mask(1, dd->tx_dma_crci);
2147
2148 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2149 CMD_DST_CRCI(dd->tx_dma_crci);
2150 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2151 SPI_OUTPUT_FIFO;
2152 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2153 CMD_SRC_CRCI(dd->rx_dma_crci);
2154 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2155 SPI_INPUT_FIFO;
2156
2157 /* Clear remaining activities on channel */
2158 msm_dmov_flush(dd->tx_dma_chan);
2159 msm_dmov_flush(dd->rx_dma_chan);
2160
2161 return 0;
2162}
2163
2164static int __init msm_spi_probe(struct platform_device *pdev)
2165{
2166 struct spi_master *master;
2167 struct msm_spi *dd;
2168 struct resource *resource;
2169 int rc = -ENXIO;
2170 int locked = 0;
2171 int i = 0;
2172 int clk_enabled = 0;
2173 int pclk_enabled = 0;
2174 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2175
2176 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2177 if (!master) {
2178 rc = -ENOMEM;
2179 dev_err(&pdev->dev, "master allocation failed\n");
2180 goto err_probe_exit;
2181 }
2182
2183 master->bus_num = pdev->id;
2184 master->mode_bits = SPI_SUPPORTED_MODES;
2185 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2186 master->setup = msm_spi_setup;
2187 master->transfer = msm_spi_transfer;
2188 platform_set_drvdata(pdev, master);
2189 dd = spi_master_get_devdata(master);
2190
2191 dd->pdata = pdata;
2192 rc = msm_spi_get_irq_data(dd, pdev);
2193 if (rc)
2194 goto err_probe_res;
2195 resource = platform_get_resource_byname(pdev,
2196 IORESOURCE_MEM, "spi_base");
2197 if (!resource) {
2198 rc = -ENXIO;
2199 goto err_probe_res;
2200 }
2201 dd->mem_phys_addr = resource->start;
2202 dd->mem_size = resource_size(resource);
2203
2204 rc = msm_spi_get_gsbi_resource(dd, pdev);
2205 if (rc)
2206 goto err_probe_res2;
2207
2208 if (pdata) {
2209 if (pdata->dma_config) {
2210 rc = pdata->dma_config();
2211 if (rc) {
2212 dev_warn(&pdev->dev,
2213 "%s: DM mode not supported\n",
2214 __func__);
2215 dd->use_dma = 0;
2216 goto skip_dma_resources;
2217 }
2218 }
2219 resource = platform_get_resource_byname(pdev,
2220 IORESOURCE_DMA,
2221 "spidm_channels");
2222 if (resource) {
2223 dd->rx_dma_chan = resource->start;
2224 dd->tx_dma_chan = resource->end;
2225
2226 resource = platform_get_resource_byname(pdev,
2227 IORESOURCE_DMA,
2228 "spidm_crci");
2229 if (!resource) {
2230 rc = -ENXIO;
2231 goto err_probe_res;
2232 }
2233 dd->rx_dma_crci = resource->start;
2234 dd->tx_dma_crci = resource->end;
2235 dd->use_dma = 1;
2236 master->dma_alignment = dma_get_cache_alignment();
2237 }
2238
2239skip_dma_resources:
2240 if (pdata->gpio_config) {
2241 rc = pdata->gpio_config();
2242 if (rc) {
2243 dev_err(&pdev->dev,
2244 "%s: error configuring GPIOs\n",
2245 __func__);
2246 goto err_probe_gpio;
2247 }
2248 }
2249 }
2250
2251 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2252 resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
2253 spi_rsrcs[i]);
2254 dd->spi_gpios[i] = resource ? resource->start : -1;
2255 }
2256
2257 rc = msm_spi_request_gpios(dd);
2258 if (rc)
2259 goto err_probe_gpio;
2260 spin_lock_init(&dd->queue_lock);
2261 mutex_init(&dd->core_lock);
2262 INIT_LIST_HEAD(&dd->queue);
2263 INIT_WORK(&dd->work_data, msm_spi_workq);
2264 init_waitqueue_head(&dd->continue_suspend);
2265 dd->workqueue = create_singlethread_workqueue(
2266 dev_name(master->dev.parent));
2267 if (!dd->workqueue)
2268 goto err_probe_workq;
2269
2270 if (!request_mem_region(dd->mem_phys_addr, dd->mem_size,
2271 SPI_DRV_NAME)) {
2272 rc = -ENXIO;
2273 goto err_probe_reqmem;
2274 }
2275
2276 dd->base = ioremap(dd->mem_phys_addr, dd->mem_size);
2277 if (!dd->base)
2278 goto err_probe_ioremap;
2279 rc = msm_spi_request_gsbi(dd);
2280 if (rc)
2281 goto err_probe_ioremap2;
2282 if (pdata && pdata->rsl_id) {
2283 struct remote_mutex_id rmid;
2284 rmid.r_spinlock_id = pdata->rsl_id;
2285 rmid.delay_us = SPI_TRYLOCK_DELAY;
2286
2287 rc = remote_mutex_init(&dd->r_lock, &rmid);
2288 if (rc) {
2289 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2290 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2291 __func__, rc);
2292 goto err_probe_rlock_init;
2293 }
2294 dd->use_rlock = 1;
2295 dd->pm_lat = pdata->pm_lat;
2296 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
2297 PM_QOS_DEFAULT_VALUE);
2298 }
2299 mutex_lock(&dd->core_lock);
2300 if (dd->use_rlock)
2301 remote_mutex_lock(&dd->r_lock);
2302 locked = 1;
2303
2304 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002305 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002306 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002307 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002308 rc = PTR_ERR(dd->clk);
2309 goto err_probe_clk_get;
2310 }
2311
Matt Wagantallac294852011-08-17 15:44:58 -07002312 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002313 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002314 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002315 rc = PTR_ERR(dd->pclk);
2316 goto err_probe_pclk_get;
2317 }
2318
2319 if (pdata && pdata->max_clock_speed)
2320 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2321
2322 rc = clk_enable(dd->clk);
2323 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002324 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002325 __func__);
2326 goto err_probe_clk_enable;
2327 }
2328 clk_enabled = 1;
2329
2330 rc = clk_enable(dd->pclk);
2331 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002332 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002333 __func__);
2334 goto err_probe_pclk_enable;
2335 }
2336 pclk_enabled = 1;
2337 msm_spi_init_gsbi(dd);
2338 msm_spi_calculate_fifo_size(dd);
2339 if (dd->use_dma) {
2340 rc = msm_spi_init_dma(dd);
2341 if (rc)
2342 goto err_probe_dma;
2343 }
2344
2345 /* Initialize registers */
2346 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
2347 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2348
2349 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
2350 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
2351 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
2352 /*
2353 * The SPI core generates a bogus input overrun error on some targets,
2354 * when a transition from run to reset state occurs and if the FIFO has
2355 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2356 * bit.
2357 */
2358 msm_spi_enable_error_flags(dd);
2359
2360 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2361 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2362 if (rc)
2363 goto err_probe_state;
2364
2365 clk_disable(dd->clk);
2366 clk_disable(dd->pclk);
2367 clk_enabled = 0;
2368 pclk_enabled = 0;
2369
2370 dd->suspended = 0;
2371 dd->transfer_pending = 0;
2372 dd->multi_xfr = 0;
2373 dd->mode = SPI_MODE_NONE;
2374
2375 rc = msm_spi_request_irq(dd, pdev->name, master);
2376 if (rc)
2377 goto err_probe_irq;
2378
2379 msm_spi_disable_irqs(dd);
2380 if (dd->use_rlock)
2381 remote_mutex_unlock(&dd->r_lock);
2382
2383 mutex_unlock(&dd->core_lock);
2384 locked = 0;
2385
2386 rc = spi_register_master(master);
2387 if (rc)
2388 goto err_probe_reg_master;
2389
2390 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2391 if (rc) {
2392 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2393 goto err_attrs;
2394 }
2395
2396 spi_debugfs_init(dd);
2397
2398 return 0;
2399
2400err_attrs:
2401err_probe_reg_master:
2402 msm_spi_free_irq(dd, master);
2403err_probe_irq:
2404err_probe_state:
2405 msm_spi_teardown_dma(dd);
2406err_probe_dma:
2407 if (pclk_enabled)
2408 clk_disable(dd->pclk);
2409err_probe_pclk_enable:
2410 if (clk_enabled)
2411 clk_disable(dd->clk);
2412err_probe_clk_enable:
2413 clk_put(dd->pclk);
2414err_probe_pclk_get:
2415 clk_put(dd->clk);
2416err_probe_clk_get:
2417 if (locked) {
2418 if (dd->use_rlock)
2419 remote_mutex_unlock(&dd->r_lock);
2420 mutex_unlock(&dd->core_lock);
2421 }
2422err_probe_rlock_init:
2423 msm_spi_release_gsbi(dd);
2424err_probe_ioremap2:
2425 iounmap(dd->base);
2426err_probe_ioremap:
2427 release_mem_region(dd->mem_phys_addr, dd->mem_size);
2428err_probe_reqmem:
2429 destroy_workqueue(dd->workqueue);
2430err_probe_workq:
2431 msm_spi_free_gpios(dd);
2432err_probe_gpio:
2433 if (pdata && pdata->gpio_release)
2434 pdata->gpio_release();
2435err_probe_res2:
2436err_probe_res:
2437 spi_master_put(master);
2438err_probe_exit:
2439 return rc;
2440}
2441
2442#ifdef CONFIG_PM
2443static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2444{
2445 struct spi_master *master = platform_get_drvdata(pdev);
2446 struct msm_spi *dd;
2447 unsigned long flags;
2448
2449 if (!master)
2450 goto suspend_exit;
2451 dd = spi_master_get_devdata(master);
2452 if (!dd)
2453 goto suspend_exit;
2454
2455 /* Make sure nothing is added to the queue while we're suspending */
2456 spin_lock_irqsave(&dd->queue_lock, flags);
2457 dd->suspended = 1;
2458 spin_unlock_irqrestore(&dd->queue_lock, flags);
2459
2460 /* Wait for transactions to end, or time out */
2461 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2462 msm_spi_free_gpios(dd);
2463
2464suspend_exit:
2465 return 0;
2466}
2467
2468static int msm_spi_resume(struct platform_device *pdev)
2469{
2470 struct spi_master *master = platform_get_drvdata(pdev);
2471 struct msm_spi *dd;
2472
2473 if (!master)
2474 goto resume_exit;
2475 dd = spi_master_get_devdata(master);
2476 if (!dd)
2477 goto resume_exit;
2478
2479 BUG_ON(msm_spi_request_gpios(dd) != 0);
2480 dd->suspended = 0;
2481resume_exit:
2482 return 0;
2483}
2484#else
2485#define msm_spi_suspend NULL
2486#define msm_spi_resume NULL
2487#endif /* CONFIG_PM */
2488
2489static int __devexit msm_spi_remove(struct platform_device *pdev)
2490{
2491 struct spi_master *master = platform_get_drvdata(pdev);
2492 struct msm_spi *dd = spi_master_get_devdata(master);
2493 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2494
2495 pm_qos_remove_request(&qos_req_list);
2496 spi_debugfs_exit(dd);
2497 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2498
2499 msm_spi_free_irq(dd, master);
2500 msm_spi_teardown_dma(dd);
2501
2502 if (pdata && pdata->gpio_release)
2503 pdata->gpio_release();
2504
2505 msm_spi_free_gpios(dd);
2506 iounmap(dd->base);
2507 release_mem_region(dd->mem_phys_addr, dd->mem_size);
2508 msm_spi_release_gsbi(dd);
2509 clk_put(dd->clk);
2510 clk_put(dd->pclk);
2511 destroy_workqueue(dd->workqueue);
2512 platform_set_drvdata(pdev, 0);
2513 spi_unregister_master(master);
2514 spi_master_put(master);
2515
2516 return 0;
2517}
2518
2519static struct platform_driver msm_spi_driver = {
2520 .driver = {
2521 .name = SPI_DRV_NAME,
2522 .owner = THIS_MODULE,
2523 },
2524 .suspend = msm_spi_suspend,
2525 .resume = msm_spi_resume,
2526 .remove = __exit_p(msm_spi_remove),
2527};
2528
2529static int __init msm_spi_init(void)
2530{
2531 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2532}
2533module_init(msm_spi_init);
2534
2535static void __exit msm_spi_exit(void)
2536{
2537 platform_driver_unregister(&msm_spi_driver);
2538}
2539module_exit(msm_spi_exit);