blob: a8bbdd1c4314f14fe77f33d7b14aef60117ea323 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
Russell Kingc8ebae32011-01-11 19:35:53 +00005 * Copyright (C) 2010 ST-Ericsson SA
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
Russell King613b1522011-01-30 21:06:53 +000017#include <linux/kernel.h>
Lee Jones000bc9d2012-04-16 10:18:43 +010018#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/delay.h>
20#include <linux/err.h>
21#include <linux/highmem.h>
Nicolas Pitre019a5f52007-10-11 01:06:03 -040022#include <linux/log2.h>
Ulf Hansson70be2082013-01-07 15:35:06 +010023#include <linux/mmc/pm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/mmc/host.h>
Linus Walleij34177802010-10-19 12:43:58 +010025#include <linux/mmc/card.h>
Russell Kinga62c80e2006-01-07 13:52:45 +000026#include <linux/amba/bus.h>
Russell Kingf8ce2542006-01-07 16:15:52 +000027#include <linux/clk.h>
Jens Axboebd6dee62007-10-24 09:01:09 +020028#include <linux/scatterlist.h>
Russell King89001442009-07-09 15:16:07 +010029#include <linux/gpio.h>
Lee Jones9a597012012-04-12 16:51:13 +010030#include <linux/of_gpio.h>
Linus Walleij34e84f32009-09-22 14:41:40 +010031#include <linux/regulator/consumer.h>
Russell Kingc8ebae32011-01-11 19:35:53 +000032#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h>
34#include <linux/amba/mmci.h>
Russell King1c3be362011-08-14 09:17:05 +010035#include <linux/pm_runtime.h>
Viresh Kumar258aea72012-02-01 16:12:19 +053036#include <linux/types.h>
Linus Walleija9a83782012-10-29 14:39:30 +010037#include <linux/pinctrl/consumer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Russell King7b09cda2005-07-01 12:02:59 +010039#include <asm/div64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include <asm/io.h>
Russell Kingc6b8fda2005-10-28 14:05:16 +010041#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include "mmci.h"
44
45#define DRIVER_NAME "mmci-pl18x"
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static unsigned int fmax = 515633;
48
Rabin Vincent4956e102010-07-21 12:54:40 +010049/**
50 * struct variant_data - MMCI variant-specific quirks
51 * @clkreg: default value for MCICLOCK register
Rabin Vincent4380c142010-07-21 12:55:18 +010052 * @clkreg_enable: enable value for MMCICLOCK register
Rabin Vincent08458ef2010-07-21 12:55:59 +010053 * @datalength_bits: number of bits in the MMCIDATALENGTH register
Rabin Vincent8301bb62010-08-09 12:57:30 +010054 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
55 * is asserted (likewise for RX)
56 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
57 * is asserted (likewise for RX)
Linus Walleij34177802010-10-19 12:43:58 +010058 * @sdio: variant supports SDIO
Linus Walleijb70a67f2010-12-06 09:24:14 +010059 * @st_clkdiv: true if using a ST-specific clock divider algorithm
Philippe Langlais1784b152011-03-25 08:51:52 +010060 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
Ulf Hansson7d72a1d2011-12-13 16:54:55 +010061 * @pwrreg_powerup: power up value for MMCIPOWER register
Ulf Hansson4d1a3a02011-12-13 16:57:07 +010062 * @signal_direction: input/out direction of bus signals can be indicated
Ulf Hanssonf4670da2013-01-09 17:19:54 +010063 * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
Rabin Vincent4956e102010-07-21 12:54:40 +010064 */
65struct variant_data {
66 unsigned int clkreg;
Rabin Vincent4380c142010-07-21 12:55:18 +010067 unsigned int clkreg_enable;
Rabin Vincent08458ef2010-07-21 12:55:59 +010068 unsigned int datalength_bits;
Rabin Vincent8301bb62010-08-09 12:57:30 +010069 unsigned int fifosize;
70 unsigned int fifohalfsize;
Linus Walleij34177802010-10-19 12:43:58 +010071 bool sdio;
Linus Walleijb70a67f2010-12-06 09:24:14 +010072 bool st_clkdiv;
Philippe Langlais1784b152011-03-25 08:51:52 +010073 bool blksz_datactrl16;
Ulf Hansson7d72a1d2011-12-13 16:54:55 +010074 u32 pwrreg_powerup;
Ulf Hansson4d1a3a02011-12-13 16:57:07 +010075 bool signal_direction;
Ulf Hanssonf4670da2013-01-09 17:19:54 +010076 bool pwrreg_clkgate;
Rabin Vincent4956e102010-07-21 12:54:40 +010077};
78
79static struct variant_data variant_arm = {
Rabin Vincent8301bb62010-08-09 12:57:30 +010080 .fifosize = 16 * 4,
81 .fifohalfsize = 8 * 4,
Rabin Vincent08458ef2010-07-21 12:55:59 +010082 .datalength_bits = 16,
Ulf Hansson7d72a1d2011-12-13 16:54:55 +010083 .pwrreg_powerup = MCI_PWR_UP,
Rabin Vincent4956e102010-07-21 12:54:40 +010084};
85
Pawel Moll768fbc12011-03-11 17:18:07 +000086static struct variant_data variant_arm_extended_fifo = {
87 .fifosize = 128 * 4,
88 .fifohalfsize = 64 * 4,
89 .datalength_bits = 16,
Ulf Hansson7d72a1d2011-12-13 16:54:55 +010090 .pwrreg_powerup = MCI_PWR_UP,
Pawel Moll768fbc12011-03-11 17:18:07 +000091};
92
Pawel Moll3a372982013-01-24 14:12:45 +010093static struct variant_data variant_arm_extended_fifo_hwfc = {
94 .fifosize = 128 * 4,
95 .fifohalfsize = 64 * 4,
96 .clkreg_enable = MCI_ARM_HWFCEN,
97 .datalength_bits = 16,
98 .pwrreg_powerup = MCI_PWR_UP,
99};
100
Rabin Vincent4956e102010-07-21 12:54:40 +0100101static struct variant_data variant_u300 = {
Rabin Vincent8301bb62010-08-09 12:57:30 +0100102 .fifosize = 16 * 4,
103 .fifohalfsize = 8 * 4,
Linus Walleij49ac2152011-03-04 14:54:16 +0100104 .clkreg_enable = MCI_ST_U300_HWFCEN,
Rabin Vincent08458ef2010-07-21 12:55:59 +0100105 .datalength_bits = 16,
Linus Walleij34177802010-10-19 12:43:58 +0100106 .sdio = true,
Ulf Hansson7d72a1d2011-12-13 16:54:55 +0100107 .pwrreg_powerup = MCI_PWR_ON,
Ulf Hansson4d1a3a02011-12-13 16:57:07 +0100108 .signal_direction = true,
Ulf Hanssonf4670da2013-01-09 17:19:54 +0100109 .pwrreg_clkgate = true,
Rabin Vincent4956e102010-07-21 12:54:40 +0100110};
111
Linus Walleij34fd4212012-04-10 17:43:59 +0100112static struct variant_data variant_nomadik = {
113 .fifosize = 16 * 4,
114 .fifohalfsize = 8 * 4,
115 .clkreg = MCI_CLK_ENABLE,
116 .datalength_bits = 24,
117 .sdio = true,
118 .st_clkdiv = true,
119 .pwrreg_powerup = MCI_PWR_ON,
120 .signal_direction = true,
Ulf Hanssonf4670da2013-01-09 17:19:54 +0100121 .pwrreg_clkgate = true,
Linus Walleij34fd4212012-04-10 17:43:59 +0100122};
123
Rabin Vincent4956e102010-07-21 12:54:40 +0100124static struct variant_data variant_ux500 = {
Rabin Vincent8301bb62010-08-09 12:57:30 +0100125 .fifosize = 30 * 4,
126 .fifohalfsize = 8 * 4,
Rabin Vincent4956e102010-07-21 12:54:40 +0100127 .clkreg = MCI_CLK_ENABLE,
Linus Walleij49ac2152011-03-04 14:54:16 +0100128 .clkreg_enable = MCI_ST_UX500_HWFCEN,
Rabin Vincent08458ef2010-07-21 12:55:59 +0100129 .datalength_bits = 24,
Linus Walleij34177802010-10-19 12:43:58 +0100130 .sdio = true,
Linus Walleijb70a67f2010-12-06 09:24:14 +0100131 .st_clkdiv = true,
Ulf Hansson7d72a1d2011-12-13 16:54:55 +0100132 .pwrreg_powerup = MCI_PWR_ON,
Ulf Hansson4d1a3a02011-12-13 16:57:07 +0100133 .signal_direction = true,
Ulf Hanssonf4670da2013-01-09 17:19:54 +0100134 .pwrreg_clkgate = true,
Rabin Vincent4956e102010-07-21 12:54:40 +0100135};
Linus Walleijb70a67f2010-12-06 09:24:14 +0100136
Philippe Langlais1784b152011-03-25 08:51:52 +0100137static struct variant_data variant_ux500v2 = {
138 .fifosize = 30 * 4,
139 .fifohalfsize = 8 * 4,
140 .clkreg = MCI_CLK_ENABLE,
141 .clkreg_enable = MCI_ST_UX500_HWFCEN,
142 .datalength_bits = 24,
143 .sdio = true,
144 .st_clkdiv = true,
145 .blksz_datactrl16 = true,
Ulf Hansson7d72a1d2011-12-13 16:54:55 +0100146 .pwrreg_powerup = MCI_PWR_ON,
Ulf Hansson4d1a3a02011-12-13 16:57:07 +0100147 .signal_direction = true,
Ulf Hanssonf4670da2013-01-09 17:19:54 +0100148 .pwrreg_clkgate = true,
Philippe Langlais1784b152011-03-25 08:51:52 +0100149};
150
Linus Walleija6a64642009-09-14 12:56:14 +0100151/*
Ulf Hansson653a7612013-01-21 21:29:34 +0100152 * Validate mmc prerequisites
153 */
154static int mmci_validate_data(struct mmci_host *host,
155 struct mmc_data *data)
156{
157 if (!data)
158 return 0;
159
160 if (!is_power_of_2(data->blksz)) {
161 dev_err(mmc_dev(host->mmc),
162 "unsupported block size (%d bytes)\n", data->blksz);
163 return -EINVAL;
164 }
165
166 return 0;
167}
168
169/*
Linus Walleija6a64642009-09-14 12:56:14 +0100170 * This must be called with host->lock held
171 */
Ulf Hansson7437cfa2012-01-18 09:17:27 +0100172static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
173{
174 if (host->clk_reg != clk) {
175 host->clk_reg = clk;
176 writel(clk, host->base + MMCICLOCK);
177 }
178}
179
180/*
181 * This must be called with host->lock held
182 */
183static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
184{
185 if (host->pwr_reg != pwr) {
186 host->pwr_reg = pwr;
187 writel(pwr, host->base + MMCIPOWER);
188 }
189}
190
191/*
192 * This must be called with host->lock held
193 */
Linus Walleija6a64642009-09-14 12:56:14 +0100194static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
195{
Rabin Vincent4956e102010-07-21 12:54:40 +0100196 struct variant_data *variant = host->variant;
197 u32 clk = variant->clkreg;
Linus Walleija6a64642009-09-14 12:56:14 +0100198
Ulf Hanssonc58a8502013-05-13 15:40:03 +0100199 /* Make sure cclk reflects the current calculated clock */
200 host->cclk = 0;
201
Linus Walleija6a64642009-09-14 12:56:14 +0100202 if (desired) {
203 if (desired >= host->mclk) {
Linus Walleij991a86e2010-12-10 09:35:53 +0100204 clk = MCI_CLK_BYPASS;
Linus Walleij399bc482011-04-01 07:59:17 +0100205 if (variant->st_clkdiv)
206 clk |= MCI_ST_UX500_NEG_EDGE;
Linus Walleija6a64642009-09-14 12:56:14 +0100207 host->cclk = host->mclk;
Linus Walleijb70a67f2010-12-06 09:24:14 +0100208 } else if (variant->st_clkdiv) {
209 /*
210 * DB8500 TRM says f = mclk / (clkdiv + 2)
211 * => clkdiv = (mclk / f) - 2
212 * Round the divider up so we don't exceed the max
213 * frequency
214 */
215 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
216 if (clk >= 256)
217 clk = 255;
218 host->cclk = host->mclk / (clk + 2);
Linus Walleija6a64642009-09-14 12:56:14 +0100219 } else {
Linus Walleijb70a67f2010-12-06 09:24:14 +0100220 /*
221 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
222 * => clkdiv = mclk / (2 * f) - 1
223 */
Linus Walleija6a64642009-09-14 12:56:14 +0100224 clk = host->mclk / (2 * desired) - 1;
225 if (clk >= 256)
226 clk = 255;
227 host->cclk = host->mclk / (2 * (clk + 1));
228 }
Rabin Vincent4380c142010-07-21 12:55:18 +0100229
230 clk |= variant->clkreg_enable;
Linus Walleija6a64642009-09-14 12:56:14 +0100231 clk |= MCI_CLK_ENABLE;
232 /* This hasn't proven to be worthwhile */
233 /* clk |= MCI_CLK_PWRSAVE; */
234 }
235
Ulf Hanssonc58a8502013-05-13 15:40:03 +0100236 /* Set actual clock for debug */
237 host->mmc->actual_clock = host->cclk;
238
Linus Walleij9e6c82c2009-09-14 12:57:11 +0100239 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
Linus Walleij771dc152010-04-08 07:38:52 +0100240 clk |= MCI_4BIT_BUS;
241 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
242 clk |= MCI_ST_8BIT_BUS;
Linus Walleij9e6c82c2009-09-14 12:57:11 +0100243
Ulf Hansson6dbb6ee2013-01-07 15:30:44 +0100244 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
245 clk |= MCI_ST_UX500_NEG_EDGE;
246
Ulf Hansson7437cfa2012-01-18 09:17:27 +0100247 mmci_write_clkreg(host, clk);
Linus Walleija6a64642009-09-14 12:56:14 +0100248}
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250static void
251mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
252{
253 writel(0, host->base + MMCICOMMAND);
254
Russell Kinge47c2222007-01-08 16:42:51 +0000255 BUG_ON(host->data);
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 host->mrq = NULL;
258 host->cmd = NULL;
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 mmc_request_done(host->mmc, mrq);
Ulf Hansson2cd976c2011-12-13 17:01:11 +0100261
262 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
263 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
Linus Walleij2686b4b2010-10-19 12:39:48 +0100266static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
267{
268 void __iomem *base = host->base;
269
270 if (host->singleirq) {
271 unsigned int mask0 = readl(base + MMCIMASK0);
272
273 mask0 &= ~MCI_IRQ1MASK;
274 mask0 |= mask;
275
276 writel(mask0, base + MMCIMASK0);
277 }
278
279 writel(mask, base + MMCIMASK1);
280}
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282static void mmci_stop_data(struct mmci_host *host)
283{
284 writel(0, host->base + MMCIDATACTRL);
Linus Walleij2686b4b2010-10-19 12:39:48 +0100285 mmci_set_mask1(host, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 host->data = NULL;
287}
288
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +0100289static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
290{
291 unsigned int flags = SG_MITER_ATOMIC;
292
293 if (data->flags & MMC_DATA_READ)
294 flags |= SG_MITER_TO_SG;
295 else
296 flags |= SG_MITER_FROM_SG;
297
298 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
299}
300
Russell Kingc8ebae32011-01-11 19:35:53 +0000301/*
302 * All the DMA operation mode stuff goes inside this ifdef.
303 * This assumes that you have a generic DMA device interface,
304 * no custom DMA interfaces are supported.
305 */
306#ifdef CONFIG_DMA_ENGINE
Bill Pembertonc3be1ef2012-11-19 13:23:06 -0500307static void mmci_dma_setup(struct mmci_host *host)
Russell Kingc8ebae32011-01-11 19:35:53 +0000308{
309 struct mmci_platform_data *plat = host->plat;
310 const char *rxname, *txname;
311 dma_cap_mask_t mask;
312
Lee Jones1fd83f02013-05-03 12:51:17 +0100313 host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
314 host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
Russell Kingc8ebae32011-01-11 19:35:53 +0000315
Per Forlin58c7ccb2011-07-01 18:55:24 +0200316 /* initialize pre request cookie */
317 host->next_data.cookie = 1;
318
Russell Kingc8ebae32011-01-11 19:35:53 +0000319 /* Try to acquire a generic DMA engine slave channel */
320 dma_cap_zero(mask);
321 dma_cap_set(DMA_SLAVE, mask);
322
Lee Jones1fd83f02013-05-03 12:51:17 +0100323 if (plat && plat->dma_filter) {
324 if (!host->dma_rx_channel && plat->dma_rx_param) {
325 host->dma_rx_channel = dma_request_channel(mask,
326 plat->dma_filter,
327 plat->dma_rx_param);
328 /* E.g if no DMA hardware is present */
329 if (!host->dma_rx_channel)
330 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
331 }
332
333 if (!host->dma_tx_channel && plat->dma_tx_param) {
334 host->dma_tx_channel = dma_request_channel(mask,
335 plat->dma_filter,
336 plat->dma_tx_param);
337 if (!host->dma_tx_channel)
338 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
339 }
340 }
341
Russell Kingc8ebae32011-01-11 19:35:53 +0000342 /*
343 * If only an RX channel is specified, the driver will
344 * attempt to use it bidirectionally, however if it is
345 * is specified but cannot be located, DMA will be disabled.
346 */
Lee Jones1fd83f02013-05-03 12:51:17 +0100347 if (host->dma_rx_channel && !host->dma_tx_channel)
Russell Kingc8ebae32011-01-11 19:35:53 +0000348 host->dma_tx_channel = host->dma_rx_channel;
Russell Kingc8ebae32011-01-11 19:35:53 +0000349
350 if (host->dma_rx_channel)
351 rxname = dma_chan_name(host->dma_rx_channel);
352 else
353 rxname = "none";
354
355 if (host->dma_tx_channel)
356 txname = dma_chan_name(host->dma_tx_channel);
357 else
358 txname = "none";
359
360 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
361 rxname, txname);
362
363 /*
364 * Limit the maximum segment size in any SG entry according to
365 * the parameters of the DMA engine device.
366 */
367 if (host->dma_tx_channel) {
368 struct device *dev = host->dma_tx_channel->device->dev;
369 unsigned int max_seg_size = dma_get_max_seg_size(dev);
370
371 if (max_seg_size < host->mmc->max_seg_size)
372 host->mmc->max_seg_size = max_seg_size;
373 }
374 if (host->dma_rx_channel) {
375 struct device *dev = host->dma_rx_channel->device->dev;
376 unsigned int max_seg_size = dma_get_max_seg_size(dev);
377
378 if (max_seg_size < host->mmc->max_seg_size)
379 host->mmc->max_seg_size = max_seg_size;
380 }
381}
382
383/*
Bill Pemberton6e0ee712012-11-19 13:26:03 -0500384 * This is used in or so inline it
Russell Kingc8ebae32011-01-11 19:35:53 +0000385 * so it can be discarded.
386 */
387static inline void mmci_dma_release(struct mmci_host *host)
388{
389 struct mmci_platform_data *plat = host->plat;
390
391 if (host->dma_rx_channel)
392 dma_release_channel(host->dma_rx_channel);
393 if (host->dma_tx_channel && plat->dma_tx_param)
394 dma_release_channel(host->dma_tx_channel);
395 host->dma_rx_channel = host->dma_tx_channel = NULL;
396}
397
Ulf Hansson653a7612013-01-21 21:29:34 +0100398static void mmci_dma_data_error(struct mmci_host *host)
399{
400 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
401 dmaengine_terminate_all(host->dma_current);
402 host->dma_current = NULL;
403 host->dma_desc_current = NULL;
404 host->data->host_cookie = 0;
405}
406
Russell Kingc8ebae32011-01-11 19:35:53 +0000407static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
408{
Ulf Hansson653a7612013-01-21 21:29:34 +0100409 struct dma_chan *chan;
Russell Kingc8ebae32011-01-11 19:35:53 +0000410 enum dma_data_direction dir;
Ulf Hansson653a7612013-01-21 21:29:34 +0100411
412 if (data->flags & MMC_DATA_READ) {
413 dir = DMA_FROM_DEVICE;
414 chan = host->dma_rx_channel;
415 } else {
416 dir = DMA_TO_DEVICE;
417 chan = host->dma_tx_channel;
418 }
419
420 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
421}
422
423static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
424{
Russell Kingc8ebae32011-01-11 19:35:53 +0000425 u32 status;
426 int i;
427
428 /* Wait up to 1ms for the DMA to complete */
429 for (i = 0; ; i++) {
430 status = readl(host->base + MMCISTATUS);
431 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
432 break;
433 udelay(10);
434 }
435
436 /*
437 * Check to see whether we still have some data left in the FIFO -
438 * this catches DMA controllers which are unable to monitor the
439 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
440 * contiguous buffers. On TX, we'll get a FIFO underrun error.
441 */
442 if (status & MCI_RXDATAAVLBLMASK) {
Ulf Hansson653a7612013-01-21 21:29:34 +0100443 mmci_dma_data_error(host);
Russell Kingc8ebae32011-01-11 19:35:53 +0000444 if (!data->error)
445 data->error = -EIO;
446 }
447
Per Forlin58c7ccb2011-07-01 18:55:24 +0200448 if (!data->host_cookie)
Ulf Hansson653a7612013-01-21 21:29:34 +0100449 mmci_dma_unmap(host, data);
Russell Kingc8ebae32011-01-11 19:35:53 +0000450
451 /*
452 * Use of DMA with scatter-gather is impossible.
453 * Give up with DMA and switch back to PIO mode.
454 */
455 if (status & MCI_RXDATAAVLBLMASK) {
456 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
457 mmci_dma_release(host);
458 }
Ulf Hansson653a7612013-01-21 21:29:34 +0100459
460 host->dma_current = NULL;
461 host->dma_desc_current = NULL;
Russell Kingc8ebae32011-01-11 19:35:53 +0000462}
463
Ulf Hansson653a7612013-01-21 21:29:34 +0100464/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
465static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
466 struct dma_chan **dma_chan,
467 struct dma_async_tx_descriptor **dma_desc)
Russell Kingc8ebae32011-01-11 19:35:53 +0000468{
469 struct variant_data *variant = host->variant;
470 struct dma_slave_config conf = {
471 .src_addr = host->phybase + MMCIFIFO,
472 .dst_addr = host->phybase + MMCIFIFO,
473 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
474 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
475 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
476 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
Viresh Kumar258aea72012-02-01 16:12:19 +0530477 .device_fc = false,
Russell Kingc8ebae32011-01-11 19:35:53 +0000478 };
Russell Kingc8ebae32011-01-11 19:35:53 +0000479 struct dma_chan *chan;
480 struct dma_device *device;
481 struct dma_async_tx_descriptor *desc;
Vinod Koul05f57992011-10-14 10:45:11 +0530482 enum dma_data_direction buffer_dirn;
Russell Kingc8ebae32011-01-11 19:35:53 +0000483 int nr_sg;
484
Russell Kingc8ebae32011-01-11 19:35:53 +0000485 if (data->flags & MMC_DATA_READ) {
Vinod Koul05f57992011-10-14 10:45:11 +0530486 conf.direction = DMA_DEV_TO_MEM;
487 buffer_dirn = DMA_FROM_DEVICE;
Russell Kingc8ebae32011-01-11 19:35:53 +0000488 chan = host->dma_rx_channel;
489 } else {
Vinod Koul05f57992011-10-14 10:45:11 +0530490 conf.direction = DMA_MEM_TO_DEV;
491 buffer_dirn = DMA_TO_DEVICE;
Russell Kingc8ebae32011-01-11 19:35:53 +0000492 chan = host->dma_tx_channel;
493 }
494
495 /* If there's no DMA channel, fall back to PIO */
496 if (!chan)
497 return -EINVAL;
498
499 /* If less than or equal to the fifo size, don't bother with DMA */
Per Forlin58c7ccb2011-07-01 18:55:24 +0200500 if (data->blksz * data->blocks <= variant->fifosize)
Russell Kingc8ebae32011-01-11 19:35:53 +0000501 return -EINVAL;
502
503 device = chan->device;
Vinod Koul05f57992011-10-14 10:45:11 +0530504 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
Russell Kingc8ebae32011-01-11 19:35:53 +0000505 if (nr_sg == 0)
506 return -EINVAL;
507
508 dmaengine_slave_config(chan, &conf);
Alexandre Bounine16052822012-03-08 16:11:18 -0500509 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
Russell Kingc8ebae32011-01-11 19:35:53 +0000510 conf.direction, DMA_CTRL_ACK);
511 if (!desc)
512 goto unmap_exit;
513
Ulf Hansson653a7612013-01-21 21:29:34 +0100514 *dma_chan = chan;
515 *dma_desc = desc;
Russell Kingc8ebae32011-01-11 19:35:53 +0000516
Per Forlin58c7ccb2011-07-01 18:55:24 +0200517 return 0;
518
519 unmap_exit:
Vinod Koul05f57992011-10-14 10:45:11 +0530520 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
Per Forlin58c7ccb2011-07-01 18:55:24 +0200521 return -ENOMEM;
522}
523
Ulf Hansson653a7612013-01-21 21:29:34 +0100524static inline int mmci_dma_prep_data(struct mmci_host *host,
525 struct mmc_data *data)
526{
527 /* Check if next job is already prepared. */
528 if (host->dma_current && host->dma_desc_current)
529 return 0;
530
531 /* No job were prepared thus do it now. */
532 return __mmci_dma_prep_data(host, data, &host->dma_current,
533 &host->dma_desc_current);
534}
535
536static inline int mmci_dma_prep_next(struct mmci_host *host,
537 struct mmc_data *data)
538{
539 struct mmci_host_next *nd = &host->next_data;
540 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
541}
542
Per Forlin58c7ccb2011-07-01 18:55:24 +0200543static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
544{
545 int ret;
546 struct mmc_data *data = host->data;
547
Ulf Hansson653a7612013-01-21 21:29:34 +0100548 ret = mmci_dma_prep_data(host, host->data);
Per Forlin58c7ccb2011-07-01 18:55:24 +0200549 if (ret)
550 return ret;
551
552 /* Okay, go for it. */
Russell Kingc8ebae32011-01-11 19:35:53 +0000553 dev_vdbg(mmc_dev(host->mmc),
554 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
555 data->sg_len, data->blksz, data->blocks, data->flags);
Per Forlin58c7ccb2011-07-01 18:55:24 +0200556 dmaengine_submit(host->dma_desc_current);
557 dma_async_issue_pending(host->dma_current);
Russell Kingc8ebae32011-01-11 19:35:53 +0000558
559 datactrl |= MCI_DPSM_DMAENABLE;
560
561 /* Trigger the DMA transfer */
562 writel(datactrl, host->base + MMCIDATACTRL);
563
564 /*
565 * Let the MMCI say when the data is ended and it's time
566 * to fire next DMA request. When that happens, MMCI will
567 * call mmci_data_end()
568 */
569 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
570 host->base + MMCIMASK0);
571 return 0;
Russell Kingc8ebae32011-01-11 19:35:53 +0000572}
Per Forlin58c7ccb2011-07-01 18:55:24 +0200573
574static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
575{
576 struct mmci_host_next *next = &host->next_data;
577
Ulf Hansson653a7612013-01-21 21:29:34 +0100578 WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
579 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
Per Forlin58c7ccb2011-07-01 18:55:24 +0200580
581 host->dma_desc_current = next->dma_desc;
582 host->dma_current = next->dma_chan;
Per Forlin58c7ccb2011-07-01 18:55:24 +0200583 next->dma_desc = NULL;
584 next->dma_chan = NULL;
585}
586
587static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
588 bool is_first_req)
589{
590 struct mmci_host *host = mmc_priv(mmc);
591 struct mmc_data *data = mrq->data;
592 struct mmci_host_next *nd = &host->next_data;
593
594 if (!data)
595 return;
596
Ulf Hansson653a7612013-01-21 21:29:34 +0100597 BUG_ON(data->host_cookie);
Per Forlin58c7ccb2011-07-01 18:55:24 +0200598
Ulf Hansson653a7612013-01-21 21:29:34 +0100599 if (mmci_validate_data(host, data))
600 return;
601
602 if (!mmci_dma_prep_next(host, data))
603 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
Per Forlin58c7ccb2011-07-01 18:55:24 +0200604}
605
606static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
607 int err)
608{
609 struct mmci_host *host = mmc_priv(mmc);
610 struct mmc_data *data = mrq->data;
Per Forlin58c7ccb2011-07-01 18:55:24 +0200611
Ulf Hansson653a7612013-01-21 21:29:34 +0100612 if (!data || !data->host_cookie)
Per Forlin58c7ccb2011-07-01 18:55:24 +0200613 return;
614
Ulf Hansson653a7612013-01-21 21:29:34 +0100615 mmci_dma_unmap(host, data);
Per Forlin58c7ccb2011-07-01 18:55:24 +0200616
Ulf Hansson653a7612013-01-21 21:29:34 +0100617 if (err) {
618 struct mmci_host_next *next = &host->next_data;
619 struct dma_chan *chan;
620 if (data->flags & MMC_DATA_READ)
621 chan = host->dma_rx_channel;
622 else
623 chan = host->dma_tx_channel;
624 dmaengine_terminate_all(chan);
Per Forlin58c7ccb2011-07-01 18:55:24 +0200625
Ulf Hansson653a7612013-01-21 21:29:34 +0100626 next->dma_desc = NULL;
627 next->dma_chan = NULL;
Per Forlin58c7ccb2011-07-01 18:55:24 +0200628 }
629}
630
Russell Kingc8ebae32011-01-11 19:35:53 +0000631#else
632/* Blank functions if the DMA engine is not available */
Per Forlin58c7ccb2011-07-01 18:55:24 +0200633static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
634{
635}
Russell Kingc8ebae32011-01-11 19:35:53 +0000636static inline void mmci_dma_setup(struct mmci_host *host)
637{
638}
639
640static inline void mmci_dma_release(struct mmci_host *host)
641{
642}
643
644static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
645{
646}
647
Ulf Hansson653a7612013-01-21 21:29:34 +0100648static inline void mmci_dma_finalize(struct mmci_host *host,
649 struct mmc_data *data)
650{
651}
652
Russell Kingc8ebae32011-01-11 19:35:53 +0000653static inline void mmci_dma_data_error(struct mmci_host *host)
654{
655}
656
657static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
658{
659 return -ENOSYS;
660}
Per Forlin58c7ccb2011-07-01 18:55:24 +0200661
662#define mmci_pre_request NULL
663#define mmci_post_request NULL
664
Russell Kingc8ebae32011-01-11 19:35:53 +0000665#endif
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
668{
Rabin Vincent8301bb62010-08-09 12:57:30 +0100669 struct variant_data *variant = host->variant;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 unsigned int datactrl, timeout, irqmask;
Russell King7b09cda2005-07-01 12:02:59 +0100671 unsigned long long clks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 void __iomem *base;
Russell King3bc87f22006-08-27 13:51:28 +0100673 int blksz_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Linus Walleij64de0282010-02-19 01:09:10 +0100675 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
676 data->blksz, data->blocks, data->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 host->data = data;
Rabin Vincent528320d2010-07-21 12:49:49 +0100679 host->size = data->blksz * data->blocks;
Russell King51d43752011-01-27 10:56:52 +0000680 data->bytes_xfered = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Russell King7b09cda2005-07-01 12:02:59 +0100682 clks = (unsigned long long)data->timeout_ns * host->cclk;
683 do_div(clks, 1000000000UL);
684
685 timeout = data->timeout_clks + (unsigned int)clks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
687 base = host->base;
688 writel(timeout, base + MMCIDATATIMER);
689 writel(host->size, base + MMCIDATALENGTH);
690
Russell King3bc87f22006-08-27 13:51:28 +0100691 blksz_bits = ffs(data->blksz) - 1;
692 BUG_ON(1 << blksz_bits != data->blksz);
693
Philippe Langlais1784b152011-03-25 08:51:52 +0100694 if (variant->blksz_datactrl16)
695 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
696 else
697 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
Russell Kingc8ebae32011-01-11 19:35:53 +0000698
699 if (data->flags & MMC_DATA_READ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 datactrl |= MCI_DPSM_DIRECTION;
Russell Kingc8ebae32011-01-11 19:35:53 +0000701
Ulf Hansson7258db72011-12-13 17:05:28 +0100702 /* The ST Micro variants has a special bit to enable SDIO */
703 if (variant->sdio && host->mmc->card)
Ulf Hansson06c1a122012-10-12 14:01:50 +0100704 if (mmc_card_sdio(host->mmc->card)) {
705 /*
706 * The ST Micro variants has a special bit
707 * to enable SDIO.
708 */
709 u32 clk;
710
Ulf Hansson7258db72011-12-13 17:05:28 +0100711 datactrl |= MCI_ST_DPSM_SDIOEN;
712
Ulf Hansson06c1a122012-10-12 14:01:50 +0100713 /*
Ulf Hansson70ac0932012-10-12 14:07:36 +0100714 * The ST Micro variant for SDIO small write transfers
715 * needs to have clock H/W flow control disabled,
716 * otherwise the transfer will not start. The threshold
717 * depends on the rate of MCLK.
Ulf Hansson06c1a122012-10-12 14:01:50 +0100718 */
Ulf Hansson70ac0932012-10-12 14:07:36 +0100719 if (data->flags & MMC_DATA_WRITE &&
720 (host->size < 8 ||
721 (host->size <= 8 && host->mclk > 50000000)))
Ulf Hansson06c1a122012-10-12 14:01:50 +0100722 clk = host->clk_reg & ~variant->clkreg_enable;
723 else
724 clk = host->clk_reg | variant->clkreg_enable;
725
726 mmci_write_clkreg(host, clk);
727 }
728
Ulf Hansson6dbb6ee2013-01-07 15:30:44 +0100729 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
730 datactrl |= MCI_ST_DPSM_DDRMODE;
731
Russell Kingc8ebae32011-01-11 19:35:53 +0000732 /*
733 * Attempt to use DMA operation mode, if this
734 * should fail, fall back to PIO mode
735 */
736 if (!mmci_dma_start_data(host, datactrl))
737 return;
738
739 /* IRQ mode, map the SG list for CPU reading/writing */
740 mmci_init_sg(host, data);
741
742 if (data->flags & MMC_DATA_READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 irqmask = MCI_RXFIFOHALFFULLMASK;
Russell King0425a142006-02-16 16:48:31 +0000744
745 /*
Russell Kingc4d877c2011-01-27 09:50:13 +0000746 * If we have less than the fifo 'half-full' threshold to
747 * transfer, trigger a PIO interrupt as soon as any data
748 * is available.
Russell King0425a142006-02-16 16:48:31 +0000749 */
Russell Kingc4d877c2011-01-27 09:50:13 +0000750 if (host->size < variant->fifohalfsize)
Russell King0425a142006-02-16 16:48:31 +0000751 irqmask |= MCI_RXDATAAVLBLMASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 } else {
753 /*
754 * We don't actually need to include "FIFO empty" here
755 * since its implicit in "FIFO half empty".
756 */
757 irqmask = MCI_TXFIFOHALFEMPTYMASK;
758 }
759
760 writel(datactrl, base + MMCIDATACTRL);
761 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
Linus Walleij2686b4b2010-10-19 12:39:48 +0100762 mmci_set_mask1(host, irqmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
765static void
766mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
767{
768 void __iomem *base = host->base;
769
Linus Walleij64de0282010-02-19 01:09:10 +0100770 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 cmd->opcode, cmd->arg, cmd->flags);
772
773 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
774 writel(0, base + MMCICOMMAND);
775 udelay(1);
776 }
777
778 c |= cmd->opcode | MCI_CPSM_ENABLE;
Russell Kinge9225172006-02-02 12:23:12 +0000779 if (cmd->flags & MMC_RSP_PRESENT) {
780 if (cmd->flags & MMC_RSP_136)
781 c |= MCI_CPSM_LONGRSP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 c |= MCI_CPSM_RESPONSE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 }
784 if (/*interrupt*/0)
785 c |= MCI_CPSM_INTERRUPT;
786
787 host->cmd = cmd;
788
789 writel(cmd->arg, base + MMCIARGUMENT);
790 writel(c, base + MMCICOMMAND);
791}
792
793static void
794mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
795 unsigned int status)
796{
Linus Walleijf20f8f22010-10-19 13:41:24 +0100797 /* First check for errors */
Ulf Hanssonb63038d2011-12-13 16:51:04 +0100798 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
799 MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
Linus Walleij8cb28152011-01-24 15:22:13 +0100800 u32 remain, success;
Linus Walleijf20f8f22010-10-19 13:41:24 +0100801
Russell Kingc8ebae32011-01-11 19:35:53 +0000802 /* Terminate the DMA transfer */
Ulf Hansson653a7612013-01-21 21:29:34 +0100803 if (dma_inprogress(host)) {
Russell Kingc8ebae32011-01-11 19:35:53 +0000804 mmci_dma_data_error(host);
Ulf Hansson653a7612013-01-21 21:29:34 +0100805 mmci_dma_unmap(host, data);
806 }
Russell Kingc8ebae32011-01-11 19:35:53 +0000807
Russell Kingc8afc9d2011-02-04 09:19:46 +0000808 /*
809 * Calculate how far we are into the transfer. Note that
810 * the data counter gives the number of bytes transferred
811 * on the MMC bus, not on the host side. On reads, this
812 * can be as much as a FIFO-worth of data ahead. This
813 * matters for FIFO overruns only.
814 */
Linus Walleijf5a106d2011-01-27 17:44:34 +0100815 remain = readl(host->base + MMCIDATACNT);
Linus Walleij8cb28152011-01-24 15:22:13 +0100816 success = data->blksz * data->blocks - remain;
817
Russell Kingc8afc9d2011-02-04 09:19:46 +0000818 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
819 status, success);
Linus Walleij8cb28152011-01-24 15:22:13 +0100820 if (status & MCI_DATACRCFAIL) {
821 /* Last block was not successful */
Russell Kingc8afc9d2011-02-04 09:19:46 +0000822 success -= 1;
Pierre Ossman17b04292007-07-22 22:18:46 +0200823 data->error = -EILSEQ;
Linus Walleij8cb28152011-01-24 15:22:13 +0100824 } else if (status & MCI_DATATIMEOUT) {
Pierre Ossman17b04292007-07-22 22:18:46 +0200825 data->error = -ETIMEDOUT;
Linus Walleij757df742011-06-30 15:10:21 +0100826 } else if (status & MCI_STARTBITERR) {
827 data->error = -ECOMM;
Russell Kingc8afc9d2011-02-04 09:19:46 +0000828 } else if (status & MCI_TXUNDERRUN) {
Pierre Ossman17b04292007-07-22 22:18:46 +0200829 data->error = -EIO;
Russell Kingc8afc9d2011-02-04 09:19:46 +0000830 } else if (status & MCI_RXOVERRUN) {
831 if (success > host->variant->fifosize)
832 success -= host->variant->fifosize;
833 else
834 success = 0;
Linus Walleij8cb28152011-01-24 15:22:13 +0100835 data->error = -EIO;
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +0100836 }
Russell King51d43752011-01-27 10:56:52 +0000837 data->bytes_xfered = round_down(success, data->blksz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 }
Linus Walleijf20f8f22010-10-19 13:41:24 +0100839
Linus Walleij8cb28152011-01-24 15:22:13 +0100840 if (status & MCI_DATABLOCKEND)
841 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
Linus Walleijf20f8f22010-10-19 13:41:24 +0100842
Russell Kingccff9b52011-01-30 21:03:50 +0000843 if (status & MCI_DATAEND || data->error) {
Russell Kingc8ebae32011-01-11 19:35:53 +0000844 if (dma_inprogress(host))
Ulf Hansson653a7612013-01-21 21:29:34 +0100845 mmci_dma_finalize(host, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 mmci_stop_data(host);
847
Linus Walleij8cb28152011-01-24 15:22:13 +0100848 if (!data->error)
849 /* The error clause is handled above, success! */
Russell King51d43752011-01-27 10:56:52 +0000850 data->bytes_xfered = data->blksz * data->blocks;
Linus Walleijf20f8f22010-10-19 13:41:24 +0100851
Ulf Hansson024629c2013-05-13 15:40:56 +0100852 if (!data->stop || host->mrq->sbc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 mmci_request_end(host, data->mrq);
854 } else {
855 mmci_start_command(host, data->stop, 0);
856 }
857 }
858}
859
860static void
861mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
862 unsigned int status)
863{
864 void __iomem *base = host->base;
Ulf Hansson024629c2013-05-13 15:40:56 +0100865 bool sbc = (cmd == host->mrq->sbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867 host->cmd = NULL;
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 if (status & MCI_CMDTIMEOUT) {
Pierre Ossman17b04292007-07-22 22:18:46 +0200870 cmd->error = -ETIMEDOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
Pierre Ossman17b04292007-07-22 22:18:46 +0200872 cmd->error = -EILSEQ;
Russell King - ARM Linux9047b432011-01-11 16:35:56 +0000873 } else {
874 cmd->resp[0] = readl(base + MMCIRESPONSE0);
875 cmd->resp[1] = readl(base + MMCIRESPONSE1);
876 cmd->resp[2] = readl(base + MMCIRESPONSE2);
877 cmd->resp[3] = readl(base + MMCIRESPONSE3);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 }
879
Ulf Hansson024629c2013-05-13 15:40:56 +0100880 if ((!sbc && !cmd->data) || cmd->error) {
Ulf Hansson3b6e3c72011-12-13 16:58:43 +0100881 if (host->data) {
882 /* Terminate the DMA transfer */
Ulf Hansson653a7612013-01-21 21:29:34 +0100883 if (dma_inprogress(host)) {
Ulf Hansson3b6e3c72011-12-13 16:58:43 +0100884 mmci_dma_data_error(host);
Ulf Hansson653a7612013-01-21 21:29:34 +0100885 mmci_dma_unmap(host, host->data);
886 }
Russell Kinge47c2222007-01-08 16:42:51 +0000887 mmci_stop_data(host);
Ulf Hansson3b6e3c72011-12-13 16:58:43 +0100888 }
Ulf Hansson024629c2013-05-13 15:40:56 +0100889 mmci_request_end(host, host->mrq);
890 } else if (sbc) {
891 mmci_start_command(host, host->mrq->cmd, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
893 mmci_start_data(host, cmd->data);
894 }
895}
896
897static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
898{
899 void __iomem *base = host->base;
900 char *ptr = buffer;
901 u32 status;
Linus Walleij26eed9a2008-04-26 23:39:44 +0100902 int host_remain = host->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904 do {
Linus Walleij26eed9a2008-04-26 23:39:44 +0100905 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
907 if (count > remain)
908 count = remain;
909
910 if (count <= 0)
911 break;
912
Ulf Hansson393e5e22011-12-13 17:08:04 +0100913 /*
914 * SDIO especially may want to send something that is
915 * not divisible by 4 (as opposed to card sectors
916 * etc). Therefore make sure to always read the last bytes
917 * while only doing full 32-bit reads towards the FIFO.
918 */
919 if (unlikely(count & 0x3)) {
920 if (count < 4) {
921 unsigned char buf[4];
Davide Ciminaghi4b85da02012-12-10 14:47:21 +0100922 ioread32_rep(base + MMCIFIFO, buf, 1);
Ulf Hansson393e5e22011-12-13 17:08:04 +0100923 memcpy(ptr, buf, count);
924 } else {
Davide Ciminaghi4b85da02012-12-10 14:47:21 +0100925 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
Ulf Hansson393e5e22011-12-13 17:08:04 +0100926 count &= ~0x3;
927 }
928 } else {
Davide Ciminaghi4b85da02012-12-10 14:47:21 +0100929 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
Ulf Hansson393e5e22011-12-13 17:08:04 +0100930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 ptr += count;
933 remain -= count;
Linus Walleij26eed9a2008-04-26 23:39:44 +0100934 host_remain -= count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 if (remain == 0)
937 break;
938
939 status = readl(base + MMCISTATUS);
940 } while (status & MCI_RXDATAAVLBL);
941
942 return ptr - buffer;
943}
944
945static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
946{
Rabin Vincent8301bb62010-08-09 12:57:30 +0100947 struct variant_data *variant = host->variant;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 void __iomem *base = host->base;
949 char *ptr = buffer;
950
951 do {
952 unsigned int count, maxcnt;
953
Rabin Vincent8301bb62010-08-09 12:57:30 +0100954 maxcnt = status & MCI_TXFIFOEMPTY ?
955 variant->fifosize : variant->fifohalfsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 count = min(remain, maxcnt);
957
Linus Walleij34177802010-10-19 12:43:58 +0100958 /*
Linus Walleij34177802010-10-19 12:43:58 +0100959 * SDIO especially may want to send something that is
960 * not divisible by 4 (as opposed to card sectors
961 * etc), and the FIFO only accept full 32-bit writes.
962 * So compensate by adding +3 on the count, a single
963 * byte become a 32bit write, 7 bytes will be two
964 * 32bit writes etc.
965 */
Davide Ciminaghi4b85da02012-12-10 14:47:21 +0100966 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
968 ptr += count;
969 remain -= count;
970
971 if (remain == 0)
972 break;
973
974 status = readl(base + MMCISTATUS);
975 } while (status & MCI_TXFIFOHALFEMPTY);
976
977 return ptr - buffer;
978}
979
980/*
981 * PIO data transfer IRQ handler.
982 */
David Howells7d12e782006-10-05 14:55:46 +0100983static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984{
985 struct mmci_host *host = dev_id;
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +0100986 struct sg_mapping_iter *sg_miter = &host->sg_miter;
Rabin Vincent8301bb62010-08-09 12:57:30 +0100987 struct variant_data *variant = host->variant;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 void __iomem *base = host->base;
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +0100989 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 u32 status;
991
992 status = readl(base + MMCISTATUS);
993
Linus Walleij64de0282010-02-19 01:09:10 +0100994 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +0100996 local_irq_save(flags);
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 unsigned int remain, len;
1000 char *buffer;
1001
1002 /*
1003 * For write, we only need to test the half-empty flag
1004 * here - if the FIFO is completely empty, then by
1005 * definition it is more than half empty.
1006 *
1007 * For read, check for data available.
1008 */
1009 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1010 break;
1011
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +01001012 if (!sg_miter_next(sg_miter))
1013 break;
1014
1015 buffer = sg_miter->addr;
1016 remain = sg_miter->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 len = 0;
1019 if (status & MCI_RXACTIVE)
1020 len = mmci_pio_read(host, buffer, remain);
1021 if (status & MCI_TXACTIVE)
1022 len = mmci_pio_write(host, buffer, remain, status);
1023
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +01001024 sg_miter->consumed = len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 host->size -= len;
1027 remain -= len;
1028
1029 if (remain)
1030 break;
1031
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 status = readl(base + MMCISTATUS);
1033 } while (1);
1034
Rabin Vincent4ce1d6c2010-07-21 12:44:58 +01001035 sg_miter_stop(sg_miter);
1036
1037 local_irq_restore(flags);
1038
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 /*
Russell Kingc4d877c2011-01-27 09:50:13 +00001040 * If we have less than the fifo 'half-full' threshold to transfer,
1041 * trigger a PIO interrupt as soon as any data is available.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 */
Russell Kingc4d877c2011-01-27 09:50:13 +00001043 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
Linus Walleij2686b4b2010-10-19 12:39:48 +01001044 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
1046 /*
1047 * If we run out of data, disable the data IRQs; this
1048 * prevents a race where the FIFO becomes empty before
1049 * the chip itself has disabled the data path, and
1050 * stops us racing with our data end IRQ.
1051 */
1052 if (host->size == 0) {
Linus Walleij2686b4b2010-10-19 12:39:48 +01001053 mmci_set_mask1(host, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1055 }
1056
1057 return IRQ_HANDLED;
1058}
1059
1060/*
1061 * Handle completion of command and data transfers.
1062 */
David Howells7d12e782006-10-05 14:55:46 +01001063static irqreturn_t mmci_irq(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064{
1065 struct mmci_host *host = dev_id;
1066 u32 status;
1067 int ret = 0;
1068
1069 spin_lock(&host->lock);
1070
1071 do {
1072 struct mmc_command *cmd;
1073 struct mmc_data *data;
1074
1075 status = readl(host->base + MMCISTATUS);
Linus Walleij2686b4b2010-10-19 12:39:48 +01001076
1077 if (host->singleirq) {
1078 if (status & readl(host->base + MMCIMASK1))
1079 mmci_pio_irq(irq, dev_id);
1080
1081 status &= ~MCI_IRQ1MASK;
1082 }
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 status &= readl(host->base + MMCIMASK0);
1085 writel(status, host->base + MMCICLEAR);
1086
Linus Walleij64de0282010-02-19 01:09:10 +01001087 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 data = host->data;
Ulf Hanssonb63038d2011-12-13 16:51:04 +01001090 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
1091 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
1092 MCI_DATABLOCKEND) && data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 mmci_data_irq(host, data, status);
1094
1095 cmd = host->cmd;
1096 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
1097 mmci_cmd_irq(host, cmd, status);
1098
1099 ret = 1;
1100 } while (status);
1101
1102 spin_unlock(&host->lock);
1103
1104 return IRQ_RETVAL(ret);
1105}
1106
1107static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1108{
1109 struct mmci_host *host = mmc_priv(mmc);
Linus Walleij9e943022008-10-24 21:17:50 +01001110 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 WARN_ON(host->mrq != NULL);
1113
Ulf Hansson653a7612013-01-21 21:29:34 +01001114 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1115 if (mrq->cmd->error) {
Pierre Ossman255d01a2007-07-24 20:38:53 +02001116 mmc_request_done(mmc, mrq);
1117 return;
1118 }
1119
Russell King1c3be362011-08-14 09:17:05 +01001120 pm_runtime_get_sync(mmc_dev(mmc));
1121
Linus Walleij9e943022008-10-24 21:17:50 +01001122 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
1124 host->mrq = mrq;
1125
Per Forlin58c7ccb2011-07-01 18:55:24 +02001126 if (mrq->data)
1127 mmci_get_next_data(host, mrq->data);
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1130 mmci_start_data(host, mrq->data);
1131
Ulf Hansson024629c2013-05-13 15:40:56 +01001132 if (mrq->sbc)
1133 mmci_start_command(host, mrq->sbc, 0);
1134 else
1135 mmci_start_command(host, mrq->cmd, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Linus Walleij9e943022008-10-24 21:17:50 +01001137 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138}
1139
1140static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1141{
1142 struct mmci_host *host = mmc_priv(mmc);
Ulf Hansson7d72a1d2011-12-13 16:54:55 +01001143 struct variant_data *variant = host->variant;
Linus Walleija6a64642009-09-14 12:56:14 +01001144 u32 pwr = 0;
1145 unsigned long flags;
Lee Jonesdb90f912013-05-03 12:52:12 +01001146 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
Ulf Hansson2cd976c2011-12-13 17:01:11 +01001148 pm_runtime_get_sync(mmc_dev(mmc));
1149
Ulf Hanssonbc521812011-12-13 16:57:55 +01001150 if (host->plat->ios_handler &&
1151 host->plat->ios_handler(mmc_dev(mmc), ios))
1152 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 switch (ios->power_mode) {
1155 case MMC_POWER_OFF:
Ulf Hansson599c1d52013-01-07 16:22:50 +01001156 if (!IS_ERR(mmc->supply.vmmc))
1157 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
Lee Jones237fb5e2013-01-31 11:27:52 +00001158
Ulf Hansson7c0136e2013-05-14 13:53:10 +01001159 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
Lee Jones237fb5e2013-01-31 11:27:52 +00001160 regulator_disable(mmc->supply.vqmmc);
Ulf Hansson7c0136e2013-05-14 13:53:10 +01001161 host->vqmmc_enabled = false;
1162 }
Lee Jones237fb5e2013-01-31 11:27:52 +00001163
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 break;
1165 case MMC_POWER_UP:
Ulf Hansson599c1d52013-01-07 16:22:50 +01001166 if (!IS_ERR(mmc->supply.vmmc))
1167 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1168
Ulf Hansson7d72a1d2011-12-13 16:54:55 +01001169 /*
1170 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1171 * and instead uses MCI_PWR_ON so apply whatever value is
1172 * configured in the variant data.
1173 */
1174 pwr |= variant->pwrreg_powerup;
1175
1176 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 case MMC_POWER_ON:
Ulf Hansson7c0136e2013-05-14 13:53:10 +01001178 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
Lee Jonesdb90f912013-05-03 12:52:12 +01001179 ret = regulator_enable(mmc->supply.vqmmc);
1180 if (ret < 0)
1181 dev_err(mmc_dev(mmc),
1182 "failed to enable vqmmc regulator\n");
Ulf Hansson7c0136e2013-05-14 13:53:10 +01001183 else
1184 host->vqmmc_enabled = true;
Lee Jonesdb90f912013-05-03 12:52:12 +01001185 }
Lee Jones237fb5e2013-01-31 11:27:52 +00001186
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 pwr |= MCI_PWR_ON;
1188 break;
1189 }
1190
Ulf Hansson4d1a3a02011-12-13 16:57:07 +01001191 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1192 /*
1193 * The ST Micro variant has some additional bits
1194 * indicating signal direction for the signals in
1195 * the SD/MMC bus and feedback-clock usage.
1196 */
1197 pwr |= host->plat->sigdir;
1198
1199 if (ios->bus_width == MMC_BUS_WIDTH_4)
1200 pwr &= ~MCI_ST_DATA74DIREN;
1201 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1202 pwr &= (~MCI_ST_DATA74DIREN &
1203 ~MCI_ST_DATA31DIREN &
1204 ~MCI_ST_DATA2DIREN);
1205 }
1206
Linus Walleijcc30d602009-01-04 15:18:54 +01001207 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
Linus Walleijf17a1f02009-08-04 01:01:02 +01001208 if (host->hw_designer != AMBA_VENDOR_ST)
Linus Walleijcc30d602009-01-04 15:18:54 +01001209 pwr |= MCI_ROD;
1210 else {
1211 /*
1212 * The ST Micro variant use the ROD bit for something
1213 * else and only has OD (Open Drain).
1214 */
1215 pwr |= MCI_OD;
1216 }
1217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Ulf Hanssonf4670da2013-01-09 17:19:54 +01001219 /*
1220 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1221 * gating the clock, the MCI_PWR_ON bit is cleared.
1222 */
1223 if (!ios->clock && variant->pwrreg_clkgate)
1224 pwr &= ~MCI_PWR_ON;
1225
Linus Walleija6a64642009-09-14 12:56:14 +01001226 spin_lock_irqsave(&host->lock, flags);
1227
1228 mmci_set_clkreg(host, ios->clock);
Ulf Hansson7437cfa2012-01-18 09:17:27 +01001229 mmci_write_pwrreg(host, pwr);
Linus Walleija6a64642009-09-14 12:56:14 +01001230
1231 spin_unlock_irqrestore(&host->lock, flags);
Ulf Hansson2cd976c2011-12-13 17:01:11 +01001232
Ulf Hansson2cd976c2011-12-13 17:01:11 +01001233 pm_runtime_mark_last_busy(mmc_dev(mmc));
1234 pm_runtime_put_autosuspend(mmc_dev(mmc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235}
1236
Russell King89001442009-07-09 15:16:07 +01001237static int mmci_get_ro(struct mmc_host *mmc)
1238{
1239 struct mmci_host *host = mmc_priv(mmc);
1240
1241 if (host->gpio_wp == -ENOSYS)
1242 return -ENOSYS;
1243
Linus Walleij18a063012010-09-12 12:56:44 +01001244 return gpio_get_value_cansleep(host->gpio_wp);
Russell King89001442009-07-09 15:16:07 +01001245}
1246
1247static int mmci_get_cd(struct mmc_host *mmc)
1248{
1249 struct mmci_host *host = mmc_priv(mmc);
Rabin Vincent29719442010-08-09 12:54:43 +01001250 struct mmci_platform_data *plat = host->plat;
Russell King89001442009-07-09 15:16:07 +01001251 unsigned int status;
1252
Rabin Vincent4b8caec2010-08-09 12:56:40 +01001253 if (host->gpio_cd == -ENOSYS) {
1254 if (!plat->status)
1255 return 1; /* Assume always present */
1256
Rabin Vincent29719442010-08-09 12:54:43 +01001257 status = plat->status(mmc_dev(host->mmc));
Rabin Vincent4b8caec2010-08-09 12:56:40 +01001258 } else
Linus Walleij18a063012010-09-12 12:56:44 +01001259 status = !!gpio_get_value_cansleep(host->gpio_cd)
1260 ^ plat->cd_invert;
Russell King89001442009-07-09 15:16:07 +01001261
Russell King74bc8092010-07-29 15:58:59 +01001262 /*
1263 * Use positive logic throughout - status is zero for no card,
1264 * non-zero for card inserted.
1265 */
1266 return status;
Russell King89001442009-07-09 15:16:07 +01001267}
1268
Rabin Vincent148b8b32010-08-09 12:55:48 +01001269static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
1270{
1271 struct mmci_host *host = dev_id;
1272
1273 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1274
1275 return IRQ_HANDLED;
1276}
1277
David Brownellab7aefd2006-11-12 17:55:30 -08001278static const struct mmc_host_ops mmci_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 .request = mmci_request,
Per Forlin58c7ccb2011-07-01 18:55:24 +02001280 .pre_req = mmci_pre_request,
1281 .post_req = mmci_post_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 .set_ios = mmci_set_ios,
Russell King89001442009-07-09 15:16:07 +01001283 .get_ro = mmci_get_ro,
1284 .get_cd = mmci_get_cd,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285};
1286
Lee Jones000bc9d2012-04-16 10:18:43 +01001287#ifdef CONFIG_OF
1288static void mmci_dt_populate_generic_pdata(struct device_node *np,
1289 struct mmci_platform_data *pdata)
1290{
1291 int bus_width = 0;
1292
Lee Jones9a597012012-04-12 16:51:13 +01001293 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
Lee Jones9a597012012-04-12 16:51:13 +01001294 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
Lee Jones000bc9d2012-04-16 10:18:43 +01001295
1296 if (of_get_property(np, "cd-inverted", NULL))
1297 pdata->cd_invert = true;
1298 else
1299 pdata->cd_invert = false;
1300
1301 of_property_read_u32(np, "max-frequency", &pdata->f_max);
1302 if (!pdata->f_max)
1303 pr_warn("%s has no 'max-frequency' property\n", np->full_name);
1304
1305 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1306 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
1307 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1308 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
1309
1310 of_property_read_u32(np, "bus-width", &bus_width);
1311 switch (bus_width) {
1312 case 0 :
1313 /* No bus-width supplied. */
1314 break;
1315 case 4 :
1316 pdata->capabilities |= MMC_CAP_4_BIT_DATA;
1317 break;
1318 case 8 :
1319 pdata->capabilities |= MMC_CAP_8_BIT_DATA;
1320 break;
1321 default :
1322 pr_warn("%s: Unsupported bus width\n", np->full_name);
1323 }
1324}
Lee Jonesc0a120a2012-05-08 13:59:38 +01001325#else
1326static void mmci_dt_populate_generic_pdata(struct device_node *np,
1327 struct mmci_platform_data *pdata)
1328{
1329 return;
1330}
Lee Jones000bc9d2012-04-16 10:18:43 +01001331#endif
1332
Bill Pembertonc3be1ef2012-11-19 13:23:06 -05001333static int mmci_probe(struct amba_device *dev,
Russell Kingaa25afa2011-02-19 15:55:00 +00001334 const struct amba_id *id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335{
Linus Walleij6ef297f2009-09-22 14:29:36 +01001336 struct mmci_platform_data *plat = dev->dev.platform_data;
Lee Jones000bc9d2012-04-16 10:18:43 +01001337 struct device_node *np = dev->dev.of_node;
Rabin Vincent4956e102010-07-21 12:54:40 +01001338 struct variant_data *variant = id->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 struct mmci_host *host;
1340 struct mmc_host *mmc;
1341 int ret;
1342
Lee Jones000bc9d2012-04-16 10:18:43 +01001343 /* Must have platform data or Device Tree. */
1344 if (!plat && !np) {
1345 dev_err(&dev->dev, "No plat data or DT found\n");
1346 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 }
1348
Lee Jonesb9b52912012-06-12 10:49:51 +01001349 if (!plat) {
1350 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1351 if (!plat)
1352 return -ENOMEM;
1353 }
1354
Lee Jones000bc9d2012-04-16 10:18:43 +01001355 if (np)
1356 mmci_dt_populate_generic_pdata(np, plat);
1357
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 ret = amba_request_regions(dev, DRIVER_NAME);
1359 if (ret)
1360 goto out;
1361
1362 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1363 if (!mmc) {
1364 ret = -ENOMEM;
1365 goto rel_regions;
1366 }
1367
1368 host = mmc_priv(mmc);
Rabin Vincent4ea580f2009-04-17 08:44:19 +05301369 host->mmc = mmc;
Russell King012b7d32009-07-09 15:13:56 +01001370
Russell King89001442009-07-09 15:16:07 +01001371 host->gpio_wp = -ENOSYS;
1372 host->gpio_cd = -ENOSYS;
Rabin Vincent148b8b32010-08-09 12:55:48 +01001373 host->gpio_cd_irq = -1;
Russell King89001442009-07-09 15:16:07 +01001374
Russell King012b7d32009-07-09 15:13:56 +01001375 host->hw_designer = amba_manf(dev);
1376 host->hw_revision = amba_rev(dev);
Linus Walleij64de0282010-02-19 01:09:10 +01001377 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1378 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
Russell King012b7d32009-07-09 15:13:56 +01001379
Ulf Hansson665ba562013-05-13 15:39:17 +01001380 host->clk = devm_clk_get(&dev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 if (IS_ERR(host->clk)) {
1382 ret = PTR_ERR(host->clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 goto host_free;
1384 }
1385
Julia Lawallac940932012-08-26 16:00:59 +00001386 ret = clk_prepare_enable(host->clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 if (ret)
Ulf Hansson665ba562013-05-13 15:39:17 +01001388 goto host_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
1390 host->plat = plat;
Rabin Vincent4956e102010-07-21 12:54:40 +01001391 host->variant = variant;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 host->mclk = clk_get_rate(host->clk);
Linus Walleijc8df9a52008-04-29 09:34:07 +01001393 /*
1394 * According to the spec, mclk is max 100 MHz,
1395 * so we try to adjust the clock down to this,
1396 * (if possible).
1397 */
1398 if (host->mclk > 100000000) {
1399 ret = clk_set_rate(host->clk, 100000000);
1400 if (ret < 0)
1401 goto clk_disable;
1402 host->mclk = clk_get_rate(host->clk);
Linus Walleij64de0282010-02-19 01:09:10 +01001403 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1404 host->mclk);
Linus Walleijc8df9a52008-04-29 09:34:07 +01001405 }
Russell Kingc8ebae32011-01-11 19:35:53 +00001406 host->phybase = dev->res.start;
Linus Walleijdc890c22009-06-07 23:27:31 +01001407 host->base = ioremap(dev->res.start, resource_size(&dev->res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 if (!host->base) {
1409 ret = -ENOMEM;
1410 goto clk_disable;
1411 }
1412
1413 mmc->ops = &mmci_ops;
Linus Walleij7f294e42011-07-08 09:57:15 +01001414 /*
1415 * The ARM and ST versions of the block have slightly different
1416 * clock divider equations which means that the minimum divider
1417 * differs too.
1418 */
1419 if (variant->st_clkdiv)
1420 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1421 else
1422 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
Linus Walleij808d97c2010-04-08 07:39:38 +01001423 /*
1424 * If the platform data supplies a maximum operating
1425 * frequency, this takes precedence. Else, we fall back
1426 * to using the module parameter, which has a (low)
1427 * default value in case it is not specified. Either
1428 * value must not exceed the clock rate into the block,
1429 * of course.
1430 */
1431 if (plat->f_max)
1432 mmc->f_max = min(host->mclk, plat->f_max);
1433 else
1434 mmc->f_max = min(host->mclk, fmax);
Linus Walleij64de0282010-02-19 01:09:10 +01001435 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1436
Linus Walleija9a83782012-10-29 14:39:30 +01001437 host->pinctrl = devm_pinctrl_get(&dev->dev);
1438 if (IS_ERR(host->pinctrl)) {
1439 ret = PTR_ERR(host->pinctrl);
1440 goto clk_disable;
1441 }
1442
1443 host->pins_default = pinctrl_lookup_state(host->pinctrl,
1444 PINCTRL_STATE_DEFAULT);
1445
1446 /* enable pins to be muxed in and configured */
1447 if (!IS_ERR(host->pins_default)) {
1448 ret = pinctrl_select_state(host->pinctrl, host->pins_default);
1449 if (ret)
1450 dev_warn(&dev->dev, "could not set default pins\n");
1451 } else
1452 dev_warn(&dev->dev, "could not get default pinstate\n");
1453
Ulf Hansson599c1d52013-01-07 16:22:50 +01001454 /* Get regulators and the supported OCR mask */
1455 mmc_regulator_get_supply(mmc);
1456 if (!mmc->ocr_avail)
Linus Walleij34e84f32009-09-22 14:41:40 +01001457 mmc->ocr_avail = plat->ocr_mask;
Ulf Hansson599c1d52013-01-07 16:22:50 +01001458 else if (plat->ocr_mask)
1459 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1460
Linus Walleij9e6c82c2009-09-14 12:57:11 +01001461 mmc->caps = plat->capabilities;
Per Forlin5a092622011-11-14 12:02:28 +01001462 mmc->caps2 = plat->capabilities2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Ulf Hansson70be2082013-01-07 15:35:06 +01001464 /* We support these PM capabilities. */
1465 mmc->pm_caps = MMC_PM_KEEP_POWER;
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 /*
1468 * We can do SGIO
1469 */
Martin K. Petersena36274e2010-09-10 01:33:59 -04001470 mmc->max_segs = NR_SG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
1472 /*
Rabin Vincent08458ef2010-07-21 12:55:59 +01001473 * Since only a certain number of bits are valid in the data length
1474 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1475 * single request.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 */
Rabin Vincent08458ef2010-07-21 12:55:59 +01001477 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479 /*
1480 * Set the maximum segment size. Since we aren't doing DMA
1481 * (yet) we are only limited by the data length register.
1482 */
Pierre Ossman55db8902006-11-21 17:55:45 +01001483 mmc->max_seg_size = mmc->max_req_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Pierre Ossmanfe4a3c72006-11-21 17:54:23 +01001485 /*
1486 * Block size can be up to 2048 bytes, but must be a power of two.
1487 */
Will Deacon8f7f6b72012-02-24 11:25:21 +00001488 mmc->max_blk_size = 1 << 11;
Pierre Ossmanfe4a3c72006-11-21 17:54:23 +01001489
Pierre Ossman55db8902006-11-21 17:55:45 +01001490 /*
Will Deacon8f7f6b72012-02-24 11:25:21 +00001491 * Limit the number of blocks transferred so that we don't overflow
1492 * the maximum request size.
Pierre Ossman55db8902006-11-21 17:55:45 +01001493 */
Will Deacon8f7f6b72012-02-24 11:25:21 +00001494 mmc->max_blk_count = mmc->max_req_size >> 11;
Pierre Ossman55db8902006-11-21 17:55:45 +01001495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 spin_lock_init(&host->lock);
1497
1498 writel(0, host->base + MMCIMASK0);
1499 writel(0, host->base + MMCIMASK1);
1500 writel(0xfff, host->base + MMCICLEAR);
1501
Roland Stigge2805b9a2012-06-17 21:14:27 +01001502 if (plat->gpio_cd == -EPROBE_DEFER) {
1503 ret = -EPROBE_DEFER;
1504 goto err_gpio_cd;
1505 }
Russell King89001442009-07-09 15:16:07 +01001506 if (gpio_is_valid(plat->gpio_cd)) {
1507 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1508 if (ret == 0)
1509 ret = gpio_direction_input(plat->gpio_cd);
1510 if (ret == 0)
1511 host->gpio_cd = plat->gpio_cd;
1512 else if (ret != -ENOSYS)
1513 goto err_gpio_cd;
Rabin Vincent148b8b32010-08-09 12:55:48 +01001514
Linus Walleij17ee0832011-05-05 17:23:10 +01001515 /*
1516 * A gpio pin that will detect cards when inserted and removed
1517 * will most likely want to trigger on the edges if it is
1518 * 0 when ejected and 1 when inserted (or mutatis mutandis
1519 * for the inverted case) so we request triggers on both
1520 * edges.
1521 */
Rabin Vincent148b8b32010-08-09 12:55:48 +01001522 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
Linus Walleij17ee0832011-05-05 17:23:10 +01001523 mmci_cd_irq,
1524 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1525 DRIVER_NAME " (cd)", host);
Rabin Vincent148b8b32010-08-09 12:55:48 +01001526 if (ret >= 0)
1527 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
Russell King89001442009-07-09 15:16:07 +01001528 }
Roland Stigge2805b9a2012-06-17 21:14:27 +01001529 if (plat->gpio_wp == -EPROBE_DEFER) {
1530 ret = -EPROBE_DEFER;
1531 goto err_gpio_wp;
1532 }
Russell King89001442009-07-09 15:16:07 +01001533 if (gpio_is_valid(plat->gpio_wp)) {
1534 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1535 if (ret == 0)
1536 ret = gpio_direction_input(plat->gpio_wp);
1537 if (ret == 0)
1538 host->gpio_wp = plat->gpio_wp;
1539 else if (ret != -ENOSYS)
1540 goto err_gpio_wp;
1541 }
1542
Rabin Vincent4b8caec2010-08-09 12:56:40 +01001543 if ((host->plat->status || host->gpio_cd != -ENOSYS)
1544 && host->gpio_cd_irq < 0)
Rabin Vincent148b8b32010-08-09 12:55:48 +01001545 mmc->caps |= MMC_CAP_NEEDS_POLL;
1546
Thomas Gleixnerdace1452006-07-01 19:29:38 -07001547 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 if (ret)
1549 goto unmap;
1550
Russell Kingdfb85182012-05-03 11:33:15 +01001551 if (!dev->irq[1])
Linus Walleij2686b4b2010-10-19 12:39:48 +01001552 host->singleirq = true;
1553 else {
1554 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1555 DRIVER_NAME " (pio)", host);
1556 if (ret)
1557 goto irq0_free;
1558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Linus Walleij8cb28152011-01-24 15:22:13 +01001560 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
1562 amba_set_drvdata(dev, mmc);
1563
Russell Kingc8ebae32011-01-11 19:35:53 +00001564 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1565 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1566 amba_rev(dev), (unsigned long long)dev->res.start,
1567 dev->irq[0], dev->irq[1]);
1568
1569 mmci_dma_setup(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Ulf Hansson2cd976c2011-12-13 17:01:11 +01001571 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1572 pm_runtime_use_autosuspend(&dev->dev);
Russell King1c3be362011-08-14 09:17:05 +01001573 pm_runtime_put(&dev->dev);
1574
Russell King8c11a942010-12-28 19:40:40 +00001575 mmc_add_host(mmc);
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 return 0;
1578
1579 irq0_free:
1580 free_irq(dev->irq[0], host);
1581 unmap:
Russell King89001442009-07-09 15:16:07 +01001582 if (host->gpio_wp != -ENOSYS)
1583 gpio_free(host->gpio_wp);
1584 err_gpio_wp:
Rabin Vincent148b8b32010-08-09 12:55:48 +01001585 if (host->gpio_cd_irq >= 0)
1586 free_irq(host->gpio_cd_irq, host);
Russell King89001442009-07-09 15:16:07 +01001587 if (host->gpio_cd != -ENOSYS)
1588 gpio_free(host->gpio_cd);
1589 err_gpio_cd:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 iounmap(host->base);
1591 clk_disable:
Julia Lawallac940932012-08-26 16:00:59 +00001592 clk_disable_unprepare(host->clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 host_free:
1594 mmc_free_host(mmc);
1595 rel_regions:
1596 amba_release_regions(dev);
1597 out:
1598 return ret;
1599}
1600
Bill Pemberton6e0ee712012-11-19 13:26:03 -05001601static int mmci_remove(struct amba_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602{
1603 struct mmc_host *mmc = amba_get_drvdata(dev);
1604
1605 amba_set_drvdata(dev, NULL);
1606
1607 if (mmc) {
1608 struct mmci_host *host = mmc_priv(mmc);
1609
Russell King1c3be362011-08-14 09:17:05 +01001610 /*
1611 * Undo pm_runtime_put() in probe. We use the _sync
1612 * version here so that we can access the primecell.
1613 */
1614 pm_runtime_get_sync(&dev->dev);
1615
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 mmc_remove_host(mmc);
1617
1618 writel(0, host->base + MMCIMASK0);
1619 writel(0, host->base + MMCIMASK1);
1620
1621 writel(0, host->base + MMCICOMMAND);
1622 writel(0, host->base + MMCIDATACTRL);
1623
Russell Kingc8ebae32011-01-11 19:35:53 +00001624 mmci_dma_release(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 free_irq(dev->irq[0], host);
Linus Walleij2686b4b2010-10-19 12:39:48 +01001626 if (!host->singleirq)
1627 free_irq(dev->irq[1], host);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Russell King89001442009-07-09 15:16:07 +01001629 if (host->gpio_wp != -ENOSYS)
1630 gpio_free(host->gpio_wp);
Rabin Vincent148b8b32010-08-09 12:55:48 +01001631 if (host->gpio_cd_irq >= 0)
1632 free_irq(host->gpio_cd_irq, host);
Russell King89001442009-07-09 15:16:07 +01001633 if (host->gpio_cd != -ENOSYS)
1634 gpio_free(host->gpio_cd);
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 iounmap(host->base);
Julia Lawallac940932012-08-26 16:00:59 +00001637 clk_disable_unprepare(host->clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639 mmc_free_host(mmc);
1640
1641 amba_release_regions(dev);
1642 }
1643
1644 return 0;
1645}
1646
Ulf Hansson48fa7002011-12-13 16:59:34 +01001647#ifdef CONFIG_SUSPEND
1648static int mmci_suspend(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649{
Ulf Hansson48fa7002011-12-13 16:59:34 +01001650 struct amba_device *adev = to_amba_device(dev);
1651 struct mmc_host *mmc = amba_get_drvdata(adev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 int ret = 0;
1653
1654 if (mmc) {
1655 struct mmci_host *host = mmc_priv(mmc);
1656
Matt Fleming1a13f8f2010-05-26 14:42:08 -07001657 ret = mmc_suspend_host(mmc);
Ulf Hansson2cd976c2011-12-13 17:01:11 +01001658 if (ret == 0) {
1659 pm_runtime_get_sync(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 writel(0, host->base + MMCIMASK0);
Ulf Hansson2cd976c2011-12-13 17:01:11 +01001661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 }
1663
1664 return ret;
1665}
1666
Ulf Hansson48fa7002011-12-13 16:59:34 +01001667static int mmci_resume(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668{
Ulf Hansson48fa7002011-12-13 16:59:34 +01001669 struct amba_device *adev = to_amba_device(dev);
1670 struct mmc_host *mmc = amba_get_drvdata(adev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 int ret = 0;
1672
1673 if (mmc) {
1674 struct mmci_host *host = mmc_priv(mmc);
1675
1676 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
Ulf Hansson2cd976c2011-12-13 17:01:11 +01001677 pm_runtime_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
1679 ret = mmc_resume_host(mmc);
1680 }
1681
1682 return ret;
1683}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684#endif
1685
Ulf Hansson82592932013-01-09 11:15:26 +01001686#ifdef CONFIG_PM_RUNTIME
1687static int mmci_runtime_suspend(struct device *dev)
1688{
1689 struct amba_device *adev = to_amba_device(dev);
1690 struct mmc_host *mmc = amba_get_drvdata(adev);
1691
1692 if (mmc) {
1693 struct mmci_host *host = mmc_priv(mmc);
1694 clk_disable_unprepare(host->clk);
1695 }
1696
1697 return 0;
1698}
1699
1700static int mmci_runtime_resume(struct device *dev)
1701{
1702 struct amba_device *adev = to_amba_device(dev);
1703 struct mmc_host *mmc = amba_get_drvdata(adev);
1704
1705 if (mmc) {
1706 struct mmci_host *host = mmc_priv(mmc);
1707 clk_prepare_enable(host->clk);
1708 }
1709
1710 return 0;
1711}
1712#endif
1713
Ulf Hansson48fa7002011-12-13 16:59:34 +01001714static const struct dev_pm_ops mmci_dev_pm_ops = {
1715 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
Ulf Hansson82592932013-01-09 11:15:26 +01001716 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
Ulf Hansson48fa7002011-12-13 16:59:34 +01001717};
1718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719static struct amba_id mmci_ids[] = {
1720 {
1721 .id = 0x00041180,
Pawel Moll768fbc12011-03-11 17:18:07 +00001722 .mask = 0xff0fffff,
Rabin Vincent4956e102010-07-21 12:54:40 +01001723 .data = &variant_arm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 },
1725 {
Pawel Moll768fbc12011-03-11 17:18:07 +00001726 .id = 0x01041180,
1727 .mask = 0xff0fffff,
1728 .data = &variant_arm_extended_fifo,
1729 },
1730 {
Pawel Moll3a372982013-01-24 14:12:45 +01001731 .id = 0x02041180,
1732 .mask = 0xff0fffff,
1733 .data = &variant_arm_extended_fifo_hwfc,
1734 },
1735 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 .id = 0x00041181,
1737 .mask = 0x000fffff,
Rabin Vincent4956e102010-07-21 12:54:40 +01001738 .data = &variant_arm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 },
Linus Walleijcc30d602009-01-04 15:18:54 +01001740 /* ST Micro variants */
1741 {
1742 .id = 0x00180180,
1743 .mask = 0x00ffffff,
Rabin Vincent4956e102010-07-21 12:54:40 +01001744 .data = &variant_u300,
Linus Walleijcc30d602009-01-04 15:18:54 +01001745 },
1746 {
Linus Walleij34fd4212012-04-10 17:43:59 +01001747 .id = 0x10180180,
1748 .mask = 0xf0ffffff,
1749 .data = &variant_nomadik,
1750 },
1751 {
Linus Walleijcc30d602009-01-04 15:18:54 +01001752 .id = 0x00280180,
1753 .mask = 0x00ffffff,
Rabin Vincent4956e102010-07-21 12:54:40 +01001754 .data = &variant_u300,
1755 },
1756 {
1757 .id = 0x00480180,
Philippe Langlais1784b152011-03-25 08:51:52 +01001758 .mask = 0xf0ffffff,
Rabin Vincent4956e102010-07-21 12:54:40 +01001759 .data = &variant_ux500,
Linus Walleijcc30d602009-01-04 15:18:54 +01001760 },
Philippe Langlais1784b152011-03-25 08:51:52 +01001761 {
1762 .id = 0x10480180,
1763 .mask = 0xf0ffffff,
1764 .data = &variant_ux500v2,
1765 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 { 0, 0 },
1767};
1768
Dave Martin9f998352011-10-05 15:15:21 +01001769MODULE_DEVICE_TABLE(amba, mmci_ids);
1770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771static struct amba_driver mmci_driver = {
1772 .drv = {
1773 .name = DRIVER_NAME,
Ulf Hansson48fa7002011-12-13 16:59:34 +01001774 .pm = &mmci_dev_pm_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 },
1776 .probe = mmci_probe,
Bill Pemberton0433c142012-11-19 13:20:26 -05001777 .remove = mmci_remove,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 .id_table = mmci_ids,
1779};
1780
viresh kumar9e5ed092012-03-15 10:40:38 +01001781module_amba_driver(mmci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783module_param(fmax, uint, 0444);
1784
1785MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1786MODULE_LICENSE("GPL");