|  | /* | 
|  | *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver | 
|  | * | 
|  | *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. | 
|  | * | 
|  | * This program is free software; you can redistribute it and/or modify | 
|  | * it under the terms of the GNU General Public License as published by | 
|  | * the Free Software Foundation; either version 2 of the License, or (at | 
|  | * your option) any later version. | 
|  | * | 
|  | * Thanks to the following companies for their support: | 
|  | * | 
|  | *     - JMicron (hardware and technical support) | 
|  | */ | 
|  |  | 
|  | #include <linux/delay.h> | 
|  | #include <linux/highmem.h> | 
|  | #include <linux/io.h> | 
|  | #include <linux/dma-mapping.h> | 
|  | #include <linux/scatterlist.h> | 
|  |  | 
|  | #include <linux/leds.h> | 
|  |  | 
|  | #include <linux/mmc/host.h> | 
|  |  | 
|  | #include "sdhci.h" | 
|  |  | 
|  | #define DRIVER_NAME "sdhci" | 
|  |  | 
|  | #define DBG(f, x...) \ | 
|  | pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) | 
|  |  | 
|  | #if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ | 
|  | defined(CONFIG_MMC_SDHCI_MODULE)) | 
|  | #define SDHCI_USE_LEDS_CLASS | 
|  | #endif | 
|  |  | 
|  | static unsigned int debug_quirks = 0; | 
|  |  | 
|  | static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); | 
|  | static void sdhci_finish_data(struct sdhci_host *); | 
|  |  | 
|  | static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); | 
|  | static void sdhci_finish_command(struct sdhci_host *); | 
|  |  | 
|  | static void sdhci_dumpregs(struct sdhci_host *host) | 
|  | { | 
|  | printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n"); | 
|  |  | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n", | 
|  | sdhci_readl(host, SDHCI_DMA_ADDRESS), | 
|  | sdhci_readw(host, SDHCI_HOST_VERSION)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n", | 
|  | sdhci_readw(host, SDHCI_BLOCK_SIZE), | 
|  | sdhci_readw(host, SDHCI_BLOCK_COUNT)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", | 
|  | sdhci_readl(host, SDHCI_ARGUMENT), | 
|  | sdhci_readw(host, SDHCI_TRANSFER_MODE)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n", | 
|  | sdhci_readl(host, SDHCI_PRESENT_STATE), | 
|  | sdhci_readb(host, SDHCI_HOST_CONTROL)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n", | 
|  | sdhci_readb(host, SDHCI_POWER_CONTROL), | 
|  | sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n", | 
|  | sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), | 
|  | sdhci_readw(host, SDHCI_CLOCK_CONTROL)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n", | 
|  | sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), | 
|  | sdhci_readl(host, SDHCI_INT_STATUS)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", | 
|  | sdhci_readl(host, SDHCI_INT_ENABLE), | 
|  | sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", | 
|  | sdhci_readw(host, SDHCI_ACMD12_ERR), | 
|  | sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); | 
|  | printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n", | 
|  | sdhci_readl(host, SDHCI_CAPABILITIES), | 
|  | sdhci_readl(host, SDHCI_MAX_CURRENT)); | 
|  |  | 
|  | if (host->flags & SDHCI_USE_ADMA) | 
|  | printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", | 
|  | readl(host->ioaddr + SDHCI_ADMA_ERROR), | 
|  | readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); | 
|  |  | 
|  | printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n"); | 
|  | } | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * Low level functions                                                       * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set) | 
|  | { | 
|  | u32 ier; | 
|  |  | 
|  | ier = sdhci_readl(host, SDHCI_INT_ENABLE); | 
|  | ier &= ~clear; | 
|  | ier |= set; | 
|  | sdhci_writel(host, ier, SDHCI_INT_ENABLE); | 
|  | sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); | 
|  | } | 
|  |  | 
|  | static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs) | 
|  | { | 
|  | sdhci_clear_set_irqs(host, 0, irqs); | 
|  | } | 
|  |  | 
|  | static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) | 
|  | { | 
|  | sdhci_clear_set_irqs(host, irqs, 0); | 
|  | } | 
|  |  | 
|  | static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) | 
|  | { | 
|  | u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; | 
|  |  | 
|  | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | 
|  | return; | 
|  |  | 
|  | if (enable) | 
|  | sdhci_unmask_irqs(host, irqs); | 
|  | else | 
|  | sdhci_mask_irqs(host, irqs); | 
|  | } | 
|  |  | 
|  | static void sdhci_enable_card_detection(struct sdhci_host *host) | 
|  | { | 
|  | sdhci_set_card_detection(host, true); | 
|  | } | 
|  |  | 
|  | static void sdhci_disable_card_detection(struct sdhci_host *host) | 
|  | { | 
|  | sdhci_set_card_detection(host, false); | 
|  | } | 
|  |  | 
|  | static void sdhci_reset(struct sdhci_host *host, u8 mask) | 
|  | { | 
|  | unsigned long timeout; | 
|  | u32 uninitialized_var(ier); | 
|  |  | 
|  | if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { | 
|  | if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & | 
|  | SDHCI_CARD_PRESENT)) | 
|  | return; | 
|  | } | 
|  |  | 
|  | if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) | 
|  | ier = sdhci_readl(host, SDHCI_INT_ENABLE); | 
|  |  | 
|  | sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); | 
|  |  | 
|  | if (mask & SDHCI_RESET_ALL) | 
|  | host->clock = 0; | 
|  |  | 
|  | /* Wait max 100 ms */ | 
|  | timeout = 100; | 
|  |  | 
|  | /* hw clears the bit when it's done */ | 
|  | while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { | 
|  | if (timeout == 0) { | 
|  | printk(KERN_ERR "%s: Reset 0x%x never completed.\n", | 
|  | mmc_hostname(host->mmc), (int)mask); | 
|  | sdhci_dumpregs(host); | 
|  | return; | 
|  | } | 
|  | timeout--; | 
|  | mdelay(1); | 
|  | } | 
|  |  | 
|  | if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) | 
|  | sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); | 
|  | } | 
|  |  | 
|  | static void sdhci_init(struct sdhci_host *host) | 
|  | { | 
|  | sdhci_reset(host, SDHCI_RESET_ALL); | 
|  |  | 
|  | sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, | 
|  | SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | | 
|  | SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | | 
|  | SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | | 
|  | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE); | 
|  | } | 
|  |  | 
|  | static void sdhci_reinit(struct sdhci_host *host) | 
|  | { | 
|  | sdhci_init(host); | 
|  | sdhci_enable_card_detection(host); | 
|  | } | 
|  |  | 
|  | static void sdhci_activate_led(struct sdhci_host *host) | 
|  | { | 
|  | u8 ctrl; | 
|  |  | 
|  | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 
|  | ctrl |= SDHCI_CTRL_LED; | 
|  | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 
|  | } | 
|  |  | 
|  | static void sdhci_deactivate_led(struct sdhci_host *host) | 
|  | { | 
|  | u8 ctrl; | 
|  |  | 
|  | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 
|  | ctrl &= ~SDHCI_CTRL_LED; | 
|  | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 
|  | } | 
|  |  | 
|  | #ifdef SDHCI_USE_LEDS_CLASS | 
|  | static void sdhci_led_control(struct led_classdev *led, | 
|  | enum led_brightness brightness) | 
|  | { | 
|  | struct sdhci_host *host = container_of(led, struct sdhci_host, led); | 
|  | unsigned long flags; | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | if (brightness == LED_OFF) | 
|  | sdhci_deactivate_led(host); | 
|  | else | 
|  | sdhci_activate_led(host); | 
|  |  | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  | } | 
|  | #endif | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * Core functions                                                            * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | static void sdhci_read_block_pio(struct sdhci_host *host) | 
|  | { | 
|  | unsigned long flags; | 
|  | size_t blksize, len, chunk; | 
|  | u32 uninitialized_var(scratch); | 
|  | u8 *buf; | 
|  |  | 
|  | DBG("PIO reading\n"); | 
|  |  | 
|  | blksize = host->data->blksz; | 
|  | chunk = 0; | 
|  |  | 
|  | local_irq_save(flags); | 
|  |  | 
|  | while (blksize) { | 
|  | if (!sg_miter_next(&host->sg_miter)) | 
|  | BUG(); | 
|  |  | 
|  | len = min(host->sg_miter.length, blksize); | 
|  |  | 
|  | blksize -= len; | 
|  | host->sg_miter.consumed = len; | 
|  |  | 
|  | buf = host->sg_miter.addr; | 
|  |  | 
|  | while (len) { | 
|  | if (chunk == 0) { | 
|  | scratch = sdhci_readl(host, SDHCI_BUFFER); | 
|  | chunk = 4; | 
|  | } | 
|  |  | 
|  | *buf = scratch & 0xFF; | 
|  |  | 
|  | buf++; | 
|  | scratch >>= 8; | 
|  | chunk--; | 
|  | len--; | 
|  | } | 
|  | } | 
|  |  | 
|  | sg_miter_stop(&host->sg_miter); | 
|  |  | 
|  | local_irq_restore(flags); | 
|  | } | 
|  |  | 
|  | static void sdhci_write_block_pio(struct sdhci_host *host) | 
|  | { | 
|  | unsigned long flags; | 
|  | size_t blksize, len, chunk; | 
|  | u32 scratch; | 
|  | u8 *buf; | 
|  |  | 
|  | DBG("PIO writing\n"); | 
|  |  | 
|  | blksize = host->data->blksz; | 
|  | chunk = 0; | 
|  | scratch = 0; | 
|  |  | 
|  | local_irq_save(flags); | 
|  |  | 
|  | while (blksize) { | 
|  | if (!sg_miter_next(&host->sg_miter)) | 
|  | BUG(); | 
|  |  | 
|  | len = min(host->sg_miter.length, blksize); | 
|  |  | 
|  | blksize -= len; | 
|  | host->sg_miter.consumed = len; | 
|  |  | 
|  | buf = host->sg_miter.addr; | 
|  |  | 
|  | while (len) { | 
|  | scratch |= (u32)*buf << (chunk * 8); | 
|  |  | 
|  | buf++; | 
|  | chunk++; | 
|  | len--; | 
|  |  | 
|  | if ((chunk == 4) || ((len == 0) && (blksize == 0))) { | 
|  | sdhci_writel(host, scratch, SDHCI_BUFFER); | 
|  | chunk = 0; | 
|  | scratch = 0; | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | sg_miter_stop(&host->sg_miter); | 
|  |  | 
|  | local_irq_restore(flags); | 
|  | } | 
|  |  | 
|  | static void sdhci_transfer_pio(struct sdhci_host *host) | 
|  | { | 
|  | u32 mask; | 
|  |  | 
|  | BUG_ON(!host->data); | 
|  |  | 
|  | if (host->blocks == 0) | 
|  | return; | 
|  |  | 
|  | if (host->data->flags & MMC_DATA_READ) | 
|  | mask = SDHCI_DATA_AVAILABLE; | 
|  | else | 
|  | mask = SDHCI_SPACE_AVAILABLE; | 
|  |  | 
|  | /* | 
|  | * Some controllers (JMicron JMB38x) mess up the buffer bits | 
|  | * for transfers < 4 bytes. As long as it is just one block, | 
|  | * we can ignore the bits. | 
|  | */ | 
|  | if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && | 
|  | (host->data->blocks == 1)) | 
|  | mask = ~0; | 
|  |  | 
|  | while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { | 
|  | if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) | 
|  | udelay(100); | 
|  |  | 
|  | if (host->data->flags & MMC_DATA_READ) | 
|  | sdhci_read_block_pio(host); | 
|  | else | 
|  | sdhci_write_block_pio(host); | 
|  |  | 
|  | host->blocks--; | 
|  | if (host->blocks == 0) | 
|  | break; | 
|  | } | 
|  |  | 
|  | DBG("PIO transfer complete.\n"); | 
|  | } | 
|  |  | 
|  | static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) | 
|  | { | 
|  | local_irq_save(*flags); | 
|  | return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; | 
|  | } | 
|  |  | 
|  | static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) | 
|  | { | 
|  | kunmap_atomic(buffer, KM_BIO_SRC_IRQ); | 
|  | local_irq_restore(*flags); | 
|  | } | 
|  |  | 
|  | static int sdhci_adma_table_pre(struct sdhci_host *host, | 
|  | struct mmc_data *data) | 
|  | { | 
|  | int direction; | 
|  |  | 
|  | u8 *desc; | 
|  | u8 *align; | 
|  | dma_addr_t addr; | 
|  | dma_addr_t align_addr; | 
|  | int len, offset; | 
|  |  | 
|  | struct scatterlist *sg; | 
|  | int i; | 
|  | char *buffer; | 
|  | unsigned long flags; | 
|  |  | 
|  | /* | 
|  | * The spec does not specify endianness of descriptor table. | 
|  | * We currently guess that it is LE. | 
|  | */ | 
|  |  | 
|  | if (data->flags & MMC_DATA_READ) | 
|  | direction = DMA_FROM_DEVICE; | 
|  | else | 
|  | direction = DMA_TO_DEVICE; | 
|  |  | 
|  | /* | 
|  | * The ADMA descriptor table is mapped further down as we | 
|  | * need to fill it with data first. | 
|  | */ | 
|  |  | 
|  | host->align_addr = dma_map_single(mmc_dev(host->mmc), | 
|  | host->align_buffer, 128 * 4, direction); | 
|  | if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) | 
|  | goto fail; | 
|  | BUG_ON(host->align_addr & 0x3); | 
|  |  | 
|  | host->sg_count = dma_map_sg(mmc_dev(host->mmc), | 
|  | data->sg, data->sg_len, direction); | 
|  | if (host->sg_count == 0) | 
|  | goto unmap_align; | 
|  |  | 
|  | desc = host->adma_desc; | 
|  | align = host->align_buffer; | 
|  |  | 
|  | align_addr = host->align_addr; | 
|  |  | 
|  | for_each_sg(data->sg, sg, host->sg_count, i) { | 
|  | addr = sg_dma_address(sg); | 
|  | len = sg_dma_len(sg); | 
|  |  | 
|  | /* | 
|  | * The SDHCI specification states that ADMA | 
|  | * addresses must be 32-bit aligned. If they | 
|  | * aren't, then we use a bounce buffer for | 
|  | * the (up to three) bytes that screw up the | 
|  | * alignment. | 
|  | */ | 
|  | offset = (4 - (addr & 0x3)) & 0x3; | 
|  | if (offset) { | 
|  | if (data->flags & MMC_DATA_WRITE) { | 
|  | buffer = sdhci_kmap_atomic(sg, &flags); | 
|  | WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); | 
|  | memcpy(align, buffer, offset); | 
|  | sdhci_kunmap_atomic(buffer, &flags); | 
|  | } | 
|  |  | 
|  | desc[7] = (align_addr >> 24) & 0xff; | 
|  | desc[6] = (align_addr >> 16) & 0xff; | 
|  | desc[5] = (align_addr >> 8) & 0xff; | 
|  | desc[4] = (align_addr >> 0) & 0xff; | 
|  |  | 
|  | BUG_ON(offset > 65536); | 
|  |  | 
|  | desc[3] = (offset >> 8) & 0xff; | 
|  | desc[2] = (offset >> 0) & 0xff; | 
|  |  | 
|  | desc[1] = 0x00; | 
|  | desc[0] = 0x21; /* tran, valid */ | 
|  |  | 
|  | align += 4; | 
|  | align_addr += 4; | 
|  |  | 
|  | desc += 8; | 
|  |  | 
|  | addr += offset; | 
|  | len -= offset; | 
|  | } | 
|  |  | 
|  | desc[7] = (addr >> 24) & 0xff; | 
|  | desc[6] = (addr >> 16) & 0xff; | 
|  | desc[5] = (addr >> 8) & 0xff; | 
|  | desc[4] = (addr >> 0) & 0xff; | 
|  |  | 
|  | BUG_ON(len > 65536); | 
|  |  | 
|  | desc[3] = (len >> 8) & 0xff; | 
|  | desc[2] = (len >> 0) & 0xff; | 
|  |  | 
|  | desc[1] = 0x00; | 
|  | desc[0] = 0x21; /* tran, valid */ | 
|  |  | 
|  | desc += 8; | 
|  |  | 
|  | /* | 
|  | * If this triggers then we have a calculation bug | 
|  | * somewhere. :/ | 
|  | */ | 
|  | WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Add a terminating entry. | 
|  | */ | 
|  | desc[7] = 0; | 
|  | desc[6] = 0; | 
|  | desc[5] = 0; | 
|  | desc[4] = 0; | 
|  |  | 
|  | desc[3] = 0; | 
|  | desc[2] = 0; | 
|  |  | 
|  | desc[1] = 0x00; | 
|  | desc[0] = 0x03; /* nop, end, valid */ | 
|  |  | 
|  | /* | 
|  | * Resync align buffer as we might have changed it. | 
|  | */ | 
|  | if (data->flags & MMC_DATA_WRITE) { | 
|  | dma_sync_single_for_device(mmc_dev(host->mmc), | 
|  | host->align_addr, 128 * 4, direction); | 
|  | } | 
|  |  | 
|  | host->adma_addr = dma_map_single(mmc_dev(host->mmc), | 
|  | host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); | 
|  | if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) | 
|  | goto unmap_entries; | 
|  | BUG_ON(host->adma_addr & 0x3); | 
|  |  | 
|  | return 0; | 
|  |  | 
|  | unmap_entries: | 
|  | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | 
|  | data->sg_len, direction); | 
|  | unmap_align: | 
|  | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, | 
|  | 128 * 4, direction); | 
|  | fail: | 
|  | return -EINVAL; | 
|  | } | 
|  |  | 
|  | static void sdhci_adma_table_post(struct sdhci_host *host, | 
|  | struct mmc_data *data) | 
|  | { | 
|  | int direction; | 
|  |  | 
|  | struct scatterlist *sg; | 
|  | int i, size; | 
|  | u8 *align; | 
|  | char *buffer; | 
|  | unsigned long flags; | 
|  |  | 
|  | if (data->flags & MMC_DATA_READ) | 
|  | direction = DMA_FROM_DEVICE; | 
|  | else | 
|  | direction = DMA_TO_DEVICE; | 
|  |  | 
|  | dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, | 
|  | (128 * 2 + 1) * 4, DMA_TO_DEVICE); | 
|  |  | 
|  | dma_unmap_single(mmc_dev(host->mmc), host->align_addr, | 
|  | 128 * 4, direction); | 
|  |  | 
|  | if (data->flags & MMC_DATA_READ) { | 
|  | dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, | 
|  | data->sg_len, direction); | 
|  |  | 
|  | align = host->align_buffer; | 
|  |  | 
|  | for_each_sg(data->sg, sg, host->sg_count, i) { | 
|  | if (sg_dma_address(sg) & 0x3) { | 
|  | size = 4 - (sg_dma_address(sg) & 0x3); | 
|  |  | 
|  | buffer = sdhci_kmap_atomic(sg, &flags); | 
|  | WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); | 
|  | memcpy(buffer, align, size); | 
|  | sdhci_kunmap_atomic(buffer, &flags); | 
|  |  | 
|  | align += 4; | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | 
|  | data->sg_len, direction); | 
|  | } | 
|  |  | 
|  | static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) | 
|  | { | 
|  | u8 count; | 
|  | unsigned target_timeout, current_timeout; | 
|  |  | 
|  | /* | 
|  | * If the host controller provides us with an incorrect timeout | 
|  | * value, just skip the check and use 0xE.  The hardware may take | 
|  | * longer to time out, but that's much better than having a too-short | 
|  | * timeout value. | 
|  | */ | 
|  | if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) | 
|  | return 0xE; | 
|  |  | 
|  | /* timeout in us */ | 
|  | target_timeout = data->timeout_ns / 1000 + | 
|  | data->timeout_clks / host->clock; | 
|  |  | 
|  | /* | 
|  | * Figure out needed cycles. | 
|  | * We do this in steps in order to fit inside a 32 bit int. | 
|  | * The first step is the minimum timeout, which will have a | 
|  | * minimum resolution of 6 bits: | 
|  | * (1) 2^13*1000 > 2^22, | 
|  | * (2) host->timeout_clk < 2^16 | 
|  | *     => | 
|  | *     (1) / (2) > 2^6 | 
|  | */ | 
|  | count = 0; | 
|  | current_timeout = (1 << 13) * 1000 / host->timeout_clk; | 
|  | while (current_timeout < target_timeout) { | 
|  | count++; | 
|  | current_timeout <<= 1; | 
|  | if (count >= 0xF) | 
|  | break; | 
|  | } | 
|  |  | 
|  | if (count >= 0xF) { | 
|  | printk(KERN_WARNING "%s: Too large timeout requested!\n", | 
|  | mmc_hostname(host->mmc)); | 
|  | count = 0xE; | 
|  | } | 
|  |  | 
|  | return count; | 
|  | } | 
|  |  | 
|  | static void sdhci_set_transfer_irqs(struct sdhci_host *host) | 
|  | { | 
|  | u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; | 
|  | u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; | 
|  |  | 
|  | if (host->flags & SDHCI_REQ_USE_DMA) | 
|  | sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); | 
|  | else | 
|  | sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); | 
|  | } | 
|  |  | 
|  | static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) | 
|  | { | 
|  | u8 count; | 
|  | u8 ctrl; | 
|  | int ret; | 
|  |  | 
|  | WARN_ON(host->data); | 
|  |  | 
|  | if (data == NULL) | 
|  | return; | 
|  |  | 
|  | /* Sanity checks */ | 
|  | BUG_ON(data->blksz * data->blocks > 524288); | 
|  | BUG_ON(data->blksz > host->mmc->max_blk_size); | 
|  | BUG_ON(data->blocks > 65535); | 
|  |  | 
|  | host->data = data; | 
|  | host->data_early = 0; | 
|  |  | 
|  | count = sdhci_calc_timeout(host, data); | 
|  | sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); | 
|  |  | 
|  | if (host->flags & SDHCI_USE_DMA) | 
|  | host->flags |= SDHCI_REQ_USE_DMA; | 
|  |  | 
|  | /* | 
|  | * FIXME: This doesn't account for merging when mapping the | 
|  | * scatterlist. | 
|  | */ | 
|  | if (host->flags & SDHCI_REQ_USE_DMA) { | 
|  | int broken, i; | 
|  | struct scatterlist *sg; | 
|  |  | 
|  | broken = 0; | 
|  | if (host->flags & SDHCI_USE_ADMA) { | 
|  | if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) | 
|  | broken = 1; | 
|  | } else { | 
|  | if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) | 
|  | broken = 1; | 
|  | } | 
|  |  | 
|  | if (unlikely(broken)) { | 
|  | for_each_sg(data->sg, sg, data->sg_len, i) { | 
|  | if (sg->length & 0x3) { | 
|  | DBG("Reverting to PIO because of " | 
|  | "transfer size (%d)\n", | 
|  | sg->length); | 
|  | host->flags &= ~SDHCI_REQ_USE_DMA; | 
|  | break; | 
|  | } | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | /* | 
|  | * The assumption here being that alignment is the same after | 
|  | * translation to device address space. | 
|  | */ | 
|  | if (host->flags & SDHCI_REQ_USE_DMA) { | 
|  | int broken, i; | 
|  | struct scatterlist *sg; | 
|  |  | 
|  | broken = 0; | 
|  | if (host->flags & SDHCI_USE_ADMA) { | 
|  | /* | 
|  | * As we use 3 byte chunks to work around | 
|  | * alignment problems, we need to check this | 
|  | * quirk. | 
|  | */ | 
|  | if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) | 
|  | broken = 1; | 
|  | } else { | 
|  | if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) | 
|  | broken = 1; | 
|  | } | 
|  |  | 
|  | if (unlikely(broken)) { | 
|  | for_each_sg(data->sg, sg, data->sg_len, i) { | 
|  | if (sg->offset & 0x3) { | 
|  | DBG("Reverting to PIO because of " | 
|  | "bad alignment\n"); | 
|  | host->flags &= ~SDHCI_REQ_USE_DMA; | 
|  | break; | 
|  | } | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | if (host->flags & SDHCI_REQ_USE_DMA) { | 
|  | if (host->flags & SDHCI_USE_ADMA) { | 
|  | ret = sdhci_adma_table_pre(host, data); | 
|  | if (ret) { | 
|  | /* | 
|  | * This only happens when someone fed | 
|  | * us an invalid request. | 
|  | */ | 
|  | WARN_ON(1); | 
|  | host->flags &= ~SDHCI_REQ_USE_DMA; | 
|  | } else { | 
|  | sdhci_writel(host, host->adma_addr, | 
|  | SDHCI_ADMA_ADDRESS); | 
|  | } | 
|  | } else { | 
|  | int sg_cnt; | 
|  |  | 
|  | sg_cnt = dma_map_sg(mmc_dev(host->mmc), | 
|  | data->sg, data->sg_len, | 
|  | (data->flags & MMC_DATA_READ) ? | 
|  | DMA_FROM_DEVICE : | 
|  | DMA_TO_DEVICE); | 
|  | if (sg_cnt == 0) { | 
|  | /* | 
|  | * This only happens when someone fed | 
|  | * us an invalid request. | 
|  | */ | 
|  | WARN_ON(1); | 
|  | host->flags &= ~SDHCI_REQ_USE_DMA; | 
|  | } else { | 
|  | WARN_ON(sg_cnt != 1); | 
|  | sdhci_writel(host, sg_dma_address(data->sg), | 
|  | SDHCI_DMA_ADDRESS); | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Always adjust the DMA selection as some controllers | 
|  | * (e.g. JMicron) can't do PIO properly when the selection | 
|  | * is ADMA. | 
|  | */ | 
|  | if (host->version >= SDHCI_SPEC_200) { | 
|  | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 
|  | ctrl &= ~SDHCI_CTRL_DMA_MASK; | 
|  | if ((host->flags & SDHCI_REQ_USE_DMA) && | 
|  | (host->flags & SDHCI_USE_ADMA)) | 
|  | ctrl |= SDHCI_CTRL_ADMA32; | 
|  | else | 
|  | ctrl |= SDHCI_CTRL_SDMA; | 
|  | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 
|  | } | 
|  |  | 
|  | if (!(host->flags & SDHCI_REQ_USE_DMA)) { | 
|  | sg_miter_start(&host->sg_miter, | 
|  | data->sg, data->sg_len, SG_MITER_ATOMIC); | 
|  | host->blocks = data->blocks; | 
|  | } | 
|  |  | 
|  | sdhci_set_transfer_irqs(host); | 
|  |  | 
|  | /* We do not handle DMA boundaries, so set it to max (512 KiB) */ | 
|  | sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); | 
|  | sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); | 
|  | } | 
|  |  | 
|  | static void sdhci_set_transfer_mode(struct sdhci_host *host, | 
|  | struct mmc_data *data) | 
|  | { | 
|  | u16 mode; | 
|  |  | 
|  | if (data == NULL) | 
|  | return; | 
|  |  | 
|  | WARN_ON(!host->data); | 
|  |  | 
|  | mode = SDHCI_TRNS_BLK_CNT_EN; | 
|  | if (data->blocks > 1) | 
|  | mode |= SDHCI_TRNS_MULTI; | 
|  | if (data->flags & MMC_DATA_READ) | 
|  | mode |= SDHCI_TRNS_READ; | 
|  | if (host->flags & SDHCI_REQ_USE_DMA) | 
|  | mode |= SDHCI_TRNS_DMA; | 
|  |  | 
|  | sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); | 
|  | } | 
|  |  | 
|  | static void sdhci_finish_data(struct sdhci_host *host) | 
|  | { | 
|  | struct mmc_data *data; | 
|  |  | 
|  | BUG_ON(!host->data); | 
|  |  | 
|  | data = host->data; | 
|  | host->data = NULL; | 
|  |  | 
|  | if (host->flags & SDHCI_REQ_USE_DMA) { | 
|  | if (host->flags & SDHCI_USE_ADMA) | 
|  | sdhci_adma_table_post(host, data); | 
|  | else { | 
|  | dma_unmap_sg(mmc_dev(host->mmc), data->sg, | 
|  | data->sg_len, (data->flags & MMC_DATA_READ) ? | 
|  | DMA_FROM_DEVICE : DMA_TO_DEVICE); | 
|  | } | 
|  | } | 
|  |  | 
|  | /* | 
|  | * The specification states that the block count register must | 
|  | * be updated, but it does not specify at what point in the | 
|  | * data flow. That makes the register entirely useless to read | 
|  | * back so we have to assume that nothing made it to the card | 
|  | * in the event of an error. | 
|  | */ | 
|  | if (data->error) | 
|  | data->bytes_xfered = 0; | 
|  | else | 
|  | data->bytes_xfered = data->blksz * data->blocks; | 
|  |  | 
|  | if (data->stop) { | 
|  | /* | 
|  | * The controller needs a reset of internal state machines | 
|  | * upon error conditions. | 
|  | */ | 
|  | if (data->error) { | 
|  | sdhci_reset(host, SDHCI_RESET_CMD); | 
|  | sdhci_reset(host, SDHCI_RESET_DATA); | 
|  | } | 
|  |  | 
|  | sdhci_send_command(host, data->stop); | 
|  | } else | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | } | 
|  |  | 
|  | static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) | 
|  | { | 
|  | int flags; | 
|  | u32 mask; | 
|  | unsigned long timeout; | 
|  |  | 
|  | WARN_ON(host->cmd); | 
|  |  | 
|  | /* Wait max 10 ms */ | 
|  | timeout = 10; | 
|  |  | 
|  | mask = SDHCI_CMD_INHIBIT; | 
|  | if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) | 
|  | mask |= SDHCI_DATA_INHIBIT; | 
|  |  | 
|  | /* We shouldn't wait for data inihibit for stop commands, even | 
|  | though they might use busy signaling */ | 
|  | if (host->mrq->data && (cmd == host->mrq->data->stop)) | 
|  | mask &= ~SDHCI_DATA_INHIBIT; | 
|  |  | 
|  | while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { | 
|  | if (timeout == 0) { | 
|  | printk(KERN_ERR "%s: Controller never released " | 
|  | "inhibit bit(s).\n", mmc_hostname(host->mmc)); | 
|  | sdhci_dumpregs(host); | 
|  | cmd->error = -EIO; | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | return; | 
|  | } | 
|  | timeout--; | 
|  | mdelay(1); | 
|  | } | 
|  |  | 
|  | mod_timer(&host->timer, jiffies + 10 * HZ); | 
|  |  | 
|  | host->cmd = cmd; | 
|  |  | 
|  | sdhci_prepare_data(host, cmd->data); | 
|  |  | 
|  | sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); | 
|  |  | 
|  | sdhci_set_transfer_mode(host, cmd->data); | 
|  |  | 
|  | if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { | 
|  | printk(KERN_ERR "%s: Unsupported response type!\n", | 
|  | mmc_hostname(host->mmc)); | 
|  | cmd->error = -EINVAL; | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | return; | 
|  | } | 
|  |  | 
|  | if (!(cmd->flags & MMC_RSP_PRESENT)) | 
|  | flags = SDHCI_CMD_RESP_NONE; | 
|  | else if (cmd->flags & MMC_RSP_136) | 
|  | flags = SDHCI_CMD_RESP_LONG; | 
|  | else if (cmd->flags & MMC_RSP_BUSY) | 
|  | flags = SDHCI_CMD_RESP_SHORT_BUSY; | 
|  | else | 
|  | flags = SDHCI_CMD_RESP_SHORT; | 
|  |  | 
|  | if (cmd->flags & MMC_RSP_CRC) | 
|  | flags |= SDHCI_CMD_CRC; | 
|  | if (cmd->flags & MMC_RSP_OPCODE) | 
|  | flags |= SDHCI_CMD_INDEX; | 
|  | if (cmd->data) | 
|  | flags |= SDHCI_CMD_DATA; | 
|  |  | 
|  | sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); | 
|  | } | 
|  |  | 
|  | static void sdhci_finish_command(struct sdhci_host *host) | 
|  | { | 
|  | int i; | 
|  |  | 
|  | BUG_ON(host->cmd == NULL); | 
|  |  | 
|  | if (host->cmd->flags & MMC_RSP_PRESENT) { | 
|  | if (host->cmd->flags & MMC_RSP_136) { | 
|  | /* CRC is stripped so we need to do some shifting. */ | 
|  | for (i = 0;i < 4;i++) { | 
|  | host->cmd->resp[i] = sdhci_readl(host, | 
|  | SDHCI_RESPONSE + (3-i)*4) << 8; | 
|  | if (i != 3) | 
|  | host->cmd->resp[i] |= | 
|  | sdhci_readb(host, | 
|  | SDHCI_RESPONSE + (3-i)*4-1); | 
|  | } | 
|  | } else { | 
|  | host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); | 
|  | } | 
|  | } | 
|  |  | 
|  | host->cmd->error = 0; | 
|  |  | 
|  | if (host->data && host->data_early) | 
|  | sdhci_finish_data(host); | 
|  |  | 
|  | if (!host->cmd->data) | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  |  | 
|  | host->cmd = NULL; | 
|  | } | 
|  |  | 
|  | static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) | 
|  | { | 
|  | int div; | 
|  | u16 clk; | 
|  | unsigned long timeout; | 
|  |  | 
|  | if (clock == host->clock) | 
|  | return; | 
|  |  | 
|  | if (host->ops->set_clock) { | 
|  | host->ops->set_clock(host, clock); | 
|  | if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) | 
|  | return; | 
|  | } | 
|  |  | 
|  | sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); | 
|  |  | 
|  | if (clock == 0) | 
|  | goto out; | 
|  |  | 
|  | for (div = 1;div < 256;div *= 2) { | 
|  | if ((host->max_clk / div) <= clock) | 
|  | break; | 
|  | } | 
|  | div >>= 1; | 
|  |  | 
|  | clk = div << SDHCI_DIVIDER_SHIFT; | 
|  | clk |= SDHCI_CLOCK_INT_EN; | 
|  | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | 
|  |  | 
|  | /* Wait max 10 ms */ | 
|  | timeout = 10; | 
|  | while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) | 
|  | & SDHCI_CLOCK_INT_STABLE)) { | 
|  | if (timeout == 0) { | 
|  | printk(KERN_ERR "%s: Internal clock never " | 
|  | "stabilised.\n", mmc_hostname(host->mmc)); | 
|  | sdhci_dumpregs(host); | 
|  | return; | 
|  | } | 
|  | timeout--; | 
|  | mdelay(1); | 
|  | } | 
|  |  | 
|  | clk |= SDHCI_CLOCK_CARD_EN; | 
|  | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | 
|  |  | 
|  | out: | 
|  | host->clock = clock; | 
|  | } | 
|  |  | 
|  | static void sdhci_set_power(struct sdhci_host *host, unsigned short power) | 
|  | { | 
|  | u8 pwr; | 
|  |  | 
|  | if (power == (unsigned short)-1) | 
|  | pwr = 0; | 
|  | else { | 
|  | switch (1 << power) { | 
|  | case MMC_VDD_165_195: | 
|  | pwr = SDHCI_POWER_180; | 
|  | break; | 
|  | case MMC_VDD_29_30: | 
|  | case MMC_VDD_30_31: | 
|  | pwr = SDHCI_POWER_300; | 
|  | break; | 
|  | case MMC_VDD_32_33: | 
|  | case MMC_VDD_33_34: | 
|  | pwr = SDHCI_POWER_330; | 
|  | break; | 
|  | default: | 
|  | BUG(); | 
|  | } | 
|  | } | 
|  |  | 
|  | if (host->pwr == pwr) | 
|  | return; | 
|  |  | 
|  | host->pwr = pwr; | 
|  |  | 
|  | if (pwr == 0) { | 
|  | sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); | 
|  | return; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Spec says that we should clear the power reg before setting | 
|  | * a new value. Some controllers don't seem to like this though. | 
|  | */ | 
|  | if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) | 
|  | sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); | 
|  |  | 
|  | /* | 
|  | * At least the Marvell CaFe chip gets confused if we set the voltage | 
|  | * and set turn on power at the same time, so set the voltage first. | 
|  | */ | 
|  | if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) | 
|  | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | 
|  |  | 
|  | pwr |= SDHCI_POWER_ON; | 
|  |  | 
|  | sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); | 
|  |  | 
|  | /* | 
|  | * Some controllers need an extra 10ms delay of 10ms before they | 
|  | * can apply clock after applying power | 
|  | */ | 
|  | if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) | 
|  | mdelay(10); | 
|  | } | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * MMC callbacks                                                             * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | 
|  | { | 
|  | struct sdhci_host *host; | 
|  | bool present; | 
|  | unsigned long flags; | 
|  |  | 
|  | host = mmc_priv(mmc); | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | WARN_ON(host->mrq != NULL); | 
|  |  | 
|  | #ifndef SDHCI_USE_LEDS_CLASS | 
|  | sdhci_activate_led(host); | 
|  | #endif | 
|  |  | 
|  | host->mrq = mrq; | 
|  |  | 
|  | /* If polling, assume that the card is always present. */ | 
|  | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | 
|  | present = true; | 
|  | else | 
|  | present = sdhci_readl(host, SDHCI_PRESENT_STATE) & | 
|  | SDHCI_CARD_PRESENT; | 
|  |  | 
|  | if (!present || host->flags & SDHCI_DEVICE_DEAD) { | 
|  | host->mrq->cmd->error = -ENOMEDIUM; | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | } else | 
|  | sdhci_send_command(host, mrq->cmd); | 
|  |  | 
|  | mmiowb(); | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  | } | 
|  |  | 
|  | static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 
|  | { | 
|  | struct sdhci_host *host; | 
|  | unsigned long flags; | 
|  | u8 ctrl; | 
|  |  | 
|  | host = mmc_priv(mmc); | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | if (host->flags & SDHCI_DEVICE_DEAD) | 
|  | goto out; | 
|  |  | 
|  | /* | 
|  | * Reset the chip on each power off. | 
|  | * Should clear out any weird states. | 
|  | */ | 
|  | if (ios->power_mode == MMC_POWER_OFF) { | 
|  | sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); | 
|  | sdhci_reinit(host); | 
|  | } | 
|  |  | 
|  | sdhci_set_clock(host, ios->clock); | 
|  |  | 
|  | if (ios->power_mode == MMC_POWER_OFF) | 
|  | sdhci_set_power(host, -1); | 
|  | else | 
|  | sdhci_set_power(host, ios->vdd); | 
|  |  | 
|  | ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); | 
|  |  | 
|  | if (ios->bus_width == MMC_BUS_WIDTH_4) | 
|  | ctrl |= SDHCI_CTRL_4BITBUS; | 
|  | else | 
|  | ctrl &= ~SDHCI_CTRL_4BITBUS; | 
|  |  | 
|  | if (ios->timing == MMC_TIMING_SD_HS) | 
|  | ctrl |= SDHCI_CTRL_HISPD; | 
|  | else | 
|  | ctrl &= ~SDHCI_CTRL_HISPD; | 
|  |  | 
|  | sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); | 
|  |  | 
|  | /* | 
|  | * Some (ENE) controllers go apeshit on some ios operation, | 
|  | * signalling timeout and CRC errors even on CMD0. Resetting | 
|  | * it on each ios seems to solve the problem. | 
|  | */ | 
|  | if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) | 
|  | sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); | 
|  |  | 
|  | out: | 
|  | mmiowb(); | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  | } | 
|  |  | 
|  | static int sdhci_get_ro(struct mmc_host *mmc) | 
|  | { | 
|  | struct sdhci_host *host; | 
|  | unsigned long flags; | 
|  | int present; | 
|  |  | 
|  | host = mmc_priv(mmc); | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | if (host->flags & SDHCI_DEVICE_DEAD) | 
|  | present = 0; | 
|  | else | 
|  | present = sdhci_readl(host, SDHCI_PRESENT_STATE); | 
|  |  | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  |  | 
|  | if (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT) | 
|  | return !!(present & SDHCI_WRITE_PROTECT); | 
|  | return !(present & SDHCI_WRITE_PROTECT); | 
|  | } | 
|  |  | 
|  | static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) | 
|  | { | 
|  | struct sdhci_host *host; | 
|  | unsigned long flags; | 
|  |  | 
|  | host = mmc_priv(mmc); | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | if (host->flags & SDHCI_DEVICE_DEAD) | 
|  | goto out; | 
|  |  | 
|  | if (enable) | 
|  | sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); | 
|  | else | 
|  | sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); | 
|  | out: | 
|  | mmiowb(); | 
|  |  | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  | } | 
|  |  | 
|  | static const struct mmc_host_ops sdhci_ops = { | 
|  | .request	= sdhci_request, | 
|  | .set_ios	= sdhci_set_ios, | 
|  | .get_ro		= sdhci_get_ro, | 
|  | .enable_sdio_irq = sdhci_enable_sdio_irq, | 
|  | }; | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * Tasklets                                                                  * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | static void sdhci_tasklet_card(unsigned long param) | 
|  | { | 
|  | struct sdhci_host *host; | 
|  | unsigned long flags; | 
|  |  | 
|  | host = (struct sdhci_host*)param; | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { | 
|  | if (host->mrq) { | 
|  | printk(KERN_ERR "%s: Card removed during transfer!\n", | 
|  | mmc_hostname(host->mmc)); | 
|  | printk(KERN_ERR "%s: Resetting controller.\n", | 
|  | mmc_hostname(host->mmc)); | 
|  |  | 
|  | sdhci_reset(host, SDHCI_RESET_CMD); | 
|  | sdhci_reset(host, SDHCI_RESET_DATA); | 
|  |  | 
|  | host->mrq->cmd->error = -ENOMEDIUM; | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | } | 
|  | } | 
|  |  | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  |  | 
|  | mmc_detect_change(host->mmc, msecs_to_jiffies(200)); | 
|  | } | 
|  |  | 
|  | static void sdhci_tasklet_finish(unsigned long param) | 
|  | { | 
|  | struct sdhci_host *host; | 
|  | unsigned long flags; | 
|  | struct mmc_request *mrq; | 
|  |  | 
|  | host = (struct sdhci_host*)param; | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | del_timer(&host->timer); | 
|  |  | 
|  | mrq = host->mrq; | 
|  |  | 
|  | /* | 
|  | * The controller needs a reset of internal state machines | 
|  | * upon error conditions. | 
|  | */ | 
|  | if (!(host->flags & SDHCI_DEVICE_DEAD) && | 
|  | (mrq->cmd->error || | 
|  | (mrq->data && (mrq->data->error || | 
|  | (mrq->data->stop && mrq->data->stop->error))) || | 
|  | (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { | 
|  |  | 
|  | /* Some controllers need this kick or reset won't work here */ | 
|  | if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { | 
|  | unsigned int clock; | 
|  |  | 
|  | /* This is to force an update */ | 
|  | clock = host->clock; | 
|  | host->clock = 0; | 
|  | sdhci_set_clock(host, clock); | 
|  | } | 
|  |  | 
|  | /* Spec says we should do both at the same time, but Ricoh | 
|  | controllers do not like that. */ | 
|  | sdhci_reset(host, SDHCI_RESET_CMD); | 
|  | sdhci_reset(host, SDHCI_RESET_DATA); | 
|  | } | 
|  |  | 
|  | host->mrq = NULL; | 
|  | host->cmd = NULL; | 
|  | host->data = NULL; | 
|  |  | 
|  | #ifndef SDHCI_USE_LEDS_CLASS | 
|  | sdhci_deactivate_led(host); | 
|  | #endif | 
|  |  | 
|  | mmiowb(); | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  |  | 
|  | mmc_request_done(host->mmc, mrq); | 
|  | } | 
|  |  | 
|  | static void sdhci_timeout_timer(unsigned long data) | 
|  | { | 
|  | struct sdhci_host *host; | 
|  | unsigned long flags; | 
|  |  | 
|  | host = (struct sdhci_host*)data; | 
|  |  | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | if (host->mrq) { | 
|  | printk(KERN_ERR "%s: Timeout waiting for hardware " | 
|  | "interrupt.\n", mmc_hostname(host->mmc)); | 
|  | sdhci_dumpregs(host); | 
|  |  | 
|  | if (host->data) { | 
|  | host->data->error = -ETIMEDOUT; | 
|  | sdhci_finish_data(host); | 
|  | } else { | 
|  | if (host->cmd) | 
|  | host->cmd->error = -ETIMEDOUT; | 
|  | else | 
|  | host->mrq->cmd->error = -ETIMEDOUT; | 
|  |  | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | } | 
|  | } | 
|  |  | 
|  | mmiowb(); | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  | } | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * Interrupt handling                                                        * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask) | 
|  | { | 
|  | BUG_ON(intmask == 0); | 
|  |  | 
|  | if (!host->cmd) { | 
|  | printk(KERN_ERR "%s: Got command interrupt 0x%08x even " | 
|  | "though no command operation was in progress.\n", | 
|  | mmc_hostname(host->mmc), (unsigned)intmask); | 
|  | sdhci_dumpregs(host); | 
|  | return; | 
|  | } | 
|  |  | 
|  | if (intmask & SDHCI_INT_TIMEOUT) | 
|  | host->cmd->error = -ETIMEDOUT; | 
|  | else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | | 
|  | SDHCI_INT_INDEX)) | 
|  | host->cmd->error = -EILSEQ; | 
|  |  | 
|  | if (host->cmd->error) { | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | return; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * The host can send and interrupt when the busy state has | 
|  | * ended, allowing us to wait without wasting CPU cycles. | 
|  | * Unfortunately this is overloaded on the "data complete" | 
|  | * interrupt, so we need to take some care when handling | 
|  | * it. | 
|  | * | 
|  | * Note: The 1.0 specification is a bit ambiguous about this | 
|  | *       feature so there might be some problems with older | 
|  | *       controllers. | 
|  | */ | 
|  | if (host->cmd->flags & MMC_RSP_BUSY) { | 
|  | if (host->cmd->data) | 
|  | DBG("Cannot wait for busy signal when also " | 
|  | "doing a data transfer"); | 
|  | else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)) | 
|  | return; | 
|  |  | 
|  | /* The controller does not support the end-of-busy IRQ, | 
|  | * fall through and take the SDHCI_INT_RESPONSE */ | 
|  | } | 
|  |  | 
|  | if (intmask & SDHCI_INT_RESPONSE) | 
|  | sdhci_finish_command(host); | 
|  | } | 
|  |  | 
|  | #ifdef DEBUG | 
|  | static void sdhci_show_adma_error(struct sdhci_host *host) | 
|  | { | 
|  | const char *name = mmc_hostname(host->mmc); | 
|  | u8 *desc = host->adma_desc; | 
|  | __le32 *dma; | 
|  | __le16 *len; | 
|  | u8 attr; | 
|  |  | 
|  | sdhci_dumpregs(host); | 
|  |  | 
|  | while (true) { | 
|  | dma = (__le32 *)(desc + 4); | 
|  | len = (__le16 *)(desc + 2); | 
|  | attr = *desc; | 
|  |  | 
|  | DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", | 
|  | name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); | 
|  |  | 
|  | desc += 8; | 
|  |  | 
|  | if (attr & 2) | 
|  | break; | 
|  | } | 
|  | } | 
|  | #else | 
|  | static void sdhci_show_adma_error(struct sdhci_host *host) { } | 
|  | #endif | 
|  |  | 
|  | static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) | 
|  | { | 
|  | BUG_ON(intmask == 0); | 
|  |  | 
|  | if (!host->data) { | 
|  | /* | 
|  | * The "data complete" interrupt is also used to | 
|  | * indicate that a busy state has ended. See comment | 
|  | * above in sdhci_cmd_irq(). | 
|  | */ | 
|  | if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { | 
|  | if (intmask & SDHCI_INT_DATA_END) { | 
|  | sdhci_finish_command(host); | 
|  | return; | 
|  | } | 
|  | } | 
|  |  | 
|  | printk(KERN_ERR "%s: Got data interrupt 0x%08x even " | 
|  | "though no data operation was in progress.\n", | 
|  | mmc_hostname(host->mmc), (unsigned)intmask); | 
|  | sdhci_dumpregs(host); | 
|  |  | 
|  | return; | 
|  | } | 
|  |  | 
|  | if (intmask & SDHCI_INT_DATA_TIMEOUT) | 
|  | host->data->error = -ETIMEDOUT; | 
|  | else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) | 
|  | host->data->error = -EILSEQ; | 
|  | else if (intmask & SDHCI_INT_ADMA_ERROR) { | 
|  | printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); | 
|  | sdhci_show_adma_error(host); | 
|  | host->data->error = -EIO; | 
|  | } | 
|  |  | 
|  | if (host->data->error) | 
|  | sdhci_finish_data(host); | 
|  | else { | 
|  | if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) | 
|  | sdhci_transfer_pio(host); | 
|  |  | 
|  | /* | 
|  | * We currently don't do anything fancy with DMA | 
|  | * boundaries, but as we can't disable the feature | 
|  | * we need to at least restart the transfer. | 
|  | */ | 
|  | if (intmask & SDHCI_INT_DMA_END) | 
|  | sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), | 
|  | SDHCI_DMA_ADDRESS); | 
|  |  | 
|  | if (intmask & SDHCI_INT_DATA_END) { | 
|  | if (host->cmd) { | 
|  | /* | 
|  | * Data managed to finish before the | 
|  | * command completed. Make sure we do | 
|  | * things in the proper order. | 
|  | */ | 
|  | host->data_early = 1; | 
|  | } else { | 
|  | sdhci_finish_data(host); | 
|  | } | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | static irqreturn_t sdhci_irq(int irq, void *dev_id) | 
|  | { | 
|  | irqreturn_t result; | 
|  | struct sdhci_host* host = dev_id; | 
|  | u32 intmask; | 
|  | int cardint = 0; | 
|  |  | 
|  | spin_lock(&host->lock); | 
|  |  | 
|  | intmask = sdhci_readl(host, SDHCI_INT_STATUS); | 
|  |  | 
|  | if (!intmask || intmask == 0xffffffff) { | 
|  | result = IRQ_NONE; | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | DBG("*** %s got interrupt: 0x%08x\n", | 
|  | mmc_hostname(host->mmc), intmask); | 
|  |  | 
|  | if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { | 
|  | sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | | 
|  | SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); | 
|  | tasklet_schedule(&host->card_tasklet); | 
|  | } | 
|  |  | 
|  | intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); | 
|  |  | 
|  | if (intmask & SDHCI_INT_CMD_MASK) { | 
|  | sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, | 
|  | SDHCI_INT_STATUS); | 
|  | sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); | 
|  | } | 
|  |  | 
|  | if (intmask & SDHCI_INT_DATA_MASK) { | 
|  | sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK, | 
|  | SDHCI_INT_STATUS); | 
|  | sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); | 
|  | } | 
|  |  | 
|  | intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); | 
|  |  | 
|  | intmask &= ~SDHCI_INT_ERROR; | 
|  |  | 
|  | if (intmask & SDHCI_INT_BUS_POWER) { | 
|  | printk(KERN_ERR "%s: Card is consuming too much power!\n", | 
|  | mmc_hostname(host->mmc)); | 
|  | sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); | 
|  | } | 
|  |  | 
|  | intmask &= ~SDHCI_INT_BUS_POWER; | 
|  |  | 
|  | if (intmask & SDHCI_INT_CARD_INT) | 
|  | cardint = 1; | 
|  |  | 
|  | intmask &= ~SDHCI_INT_CARD_INT; | 
|  |  | 
|  | if (intmask) { | 
|  | printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n", | 
|  | mmc_hostname(host->mmc), intmask); | 
|  | sdhci_dumpregs(host); | 
|  |  | 
|  | sdhci_writel(host, intmask, SDHCI_INT_STATUS); | 
|  | } | 
|  |  | 
|  | result = IRQ_HANDLED; | 
|  |  | 
|  | mmiowb(); | 
|  | out: | 
|  | spin_unlock(&host->lock); | 
|  |  | 
|  | /* | 
|  | * We have to delay this as it calls back into the driver. | 
|  | */ | 
|  | if (cardint) | 
|  | mmc_signal_sdio_irq(host->mmc); | 
|  |  | 
|  | return result; | 
|  | } | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * Suspend/resume                                                            * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | #ifdef CONFIG_PM | 
|  |  | 
|  | int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) | 
|  | { | 
|  | int ret; | 
|  |  | 
|  | sdhci_disable_card_detection(host); | 
|  |  | 
|  | ret = mmc_suspend_host(host->mmc, state); | 
|  | if (ret) | 
|  | return ret; | 
|  |  | 
|  | free_irq(host->irq, host); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | EXPORT_SYMBOL_GPL(sdhci_suspend_host); | 
|  |  | 
|  | int sdhci_resume_host(struct sdhci_host *host) | 
|  | { | 
|  | int ret; | 
|  |  | 
|  | if (host->flags & SDHCI_USE_DMA) { | 
|  | if (host->ops->enable_dma) | 
|  | host->ops->enable_dma(host); | 
|  | } | 
|  |  | 
|  | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, | 
|  | mmc_hostname(host->mmc), host); | 
|  | if (ret) | 
|  | return ret; | 
|  |  | 
|  | sdhci_init(host); | 
|  | mmiowb(); | 
|  |  | 
|  | ret = mmc_resume_host(host->mmc); | 
|  | if (ret) | 
|  | return ret; | 
|  |  | 
|  | sdhci_enable_card_detection(host); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | EXPORT_SYMBOL_GPL(sdhci_resume_host); | 
|  |  | 
|  | #endif /* CONFIG_PM */ | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * Device allocation/registration                                            * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | struct sdhci_host *sdhci_alloc_host(struct device *dev, | 
|  | size_t priv_size) | 
|  | { | 
|  | struct mmc_host *mmc; | 
|  | struct sdhci_host *host; | 
|  |  | 
|  | WARN_ON(dev == NULL); | 
|  |  | 
|  | mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); | 
|  | if (!mmc) | 
|  | return ERR_PTR(-ENOMEM); | 
|  |  | 
|  | host = mmc_priv(mmc); | 
|  | host->mmc = mmc; | 
|  |  | 
|  | return host; | 
|  | } | 
|  |  | 
|  | EXPORT_SYMBOL_GPL(sdhci_alloc_host); | 
|  |  | 
|  | int sdhci_add_host(struct sdhci_host *host) | 
|  | { | 
|  | struct mmc_host *mmc; | 
|  | unsigned int caps; | 
|  | int ret; | 
|  |  | 
|  | WARN_ON(host == NULL); | 
|  | if (host == NULL) | 
|  | return -EINVAL; | 
|  |  | 
|  | mmc = host->mmc; | 
|  |  | 
|  | if (debug_quirks) | 
|  | host->quirks = debug_quirks; | 
|  |  | 
|  | sdhci_reset(host, SDHCI_RESET_ALL); | 
|  |  | 
|  | host->version = sdhci_readw(host, SDHCI_HOST_VERSION); | 
|  | host->version = (host->version & SDHCI_SPEC_VER_MASK) | 
|  | >> SDHCI_SPEC_VER_SHIFT; | 
|  | if (host->version > SDHCI_SPEC_200) { | 
|  | printk(KERN_ERR "%s: Unknown controller version (%d). " | 
|  | "You may experience problems.\n", mmc_hostname(mmc), | 
|  | host->version); | 
|  | } | 
|  |  | 
|  | caps = sdhci_readl(host, SDHCI_CAPABILITIES); | 
|  |  | 
|  | if (host->quirks & SDHCI_QUIRK_FORCE_DMA) | 
|  | host->flags |= SDHCI_USE_DMA; | 
|  | else if (!(caps & SDHCI_CAN_DO_DMA)) | 
|  | DBG("Controller doesn't have DMA capability\n"); | 
|  | else | 
|  | host->flags |= SDHCI_USE_DMA; | 
|  |  | 
|  | if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && | 
|  | (host->flags & SDHCI_USE_DMA)) { | 
|  | DBG("Disabling DMA as it is marked broken\n"); | 
|  | host->flags &= ~SDHCI_USE_DMA; | 
|  | } | 
|  |  | 
|  | if (host->flags & SDHCI_USE_DMA) { | 
|  | if ((host->version >= SDHCI_SPEC_200) && | 
|  | (caps & SDHCI_CAN_DO_ADMA2)) | 
|  | host->flags |= SDHCI_USE_ADMA; | 
|  | } | 
|  |  | 
|  | if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && | 
|  | (host->flags & SDHCI_USE_ADMA)) { | 
|  | DBG("Disabling ADMA as it is marked broken\n"); | 
|  | host->flags &= ~SDHCI_USE_ADMA; | 
|  | } | 
|  |  | 
|  | if (host->flags & SDHCI_USE_DMA) { | 
|  | if (host->ops->enable_dma) { | 
|  | if (host->ops->enable_dma(host)) { | 
|  | printk(KERN_WARNING "%s: No suitable DMA " | 
|  | "available. Falling back to PIO.\n", | 
|  | mmc_hostname(mmc)); | 
|  | host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | if (host->flags & SDHCI_USE_ADMA) { | 
|  | /* | 
|  | * We need to allocate descriptors for all sg entries | 
|  | * (128) and potentially one alignment transfer for | 
|  | * each of those entries. | 
|  | */ | 
|  | host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); | 
|  | host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); | 
|  | if (!host->adma_desc || !host->align_buffer) { | 
|  | kfree(host->adma_desc); | 
|  | kfree(host->align_buffer); | 
|  | printk(KERN_WARNING "%s: Unable to allocate ADMA " | 
|  | "buffers. Falling back to standard DMA.\n", | 
|  | mmc_hostname(mmc)); | 
|  | host->flags &= ~SDHCI_USE_ADMA; | 
|  | } | 
|  | } | 
|  |  | 
|  | /* | 
|  | * If we use DMA, then it's up to the caller to set the DMA | 
|  | * mask, but PIO does not need the hw shim so we set a new | 
|  | * mask here in that case. | 
|  | */ | 
|  | if (!(host->flags & SDHCI_USE_DMA)) { | 
|  | host->dma_mask = DMA_BIT_MASK(64); | 
|  | mmc_dev(host->mmc)->dma_mask = &host->dma_mask; | 
|  | } | 
|  |  | 
|  | host->max_clk = | 
|  | (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; | 
|  | host->max_clk *= 1000000; | 
|  | if (host->max_clk == 0) { | 
|  | if (!host->ops->get_max_clock) { | 
|  | printk(KERN_ERR | 
|  | "%s: Hardware doesn't specify base clock " | 
|  | "frequency.\n", mmc_hostname(mmc)); | 
|  | return -ENODEV; | 
|  | } | 
|  | host->max_clk = host->ops->get_max_clock(host); | 
|  | } | 
|  |  | 
|  | host->timeout_clk = | 
|  | (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; | 
|  | if (host->timeout_clk == 0) { | 
|  | if (!host->ops->get_timeout_clock) { | 
|  | printk(KERN_ERR | 
|  | "%s: Hardware doesn't specify timeout clock " | 
|  | "frequency.\n", mmc_hostname(mmc)); | 
|  | return -ENODEV; | 
|  | } | 
|  | host->timeout_clk = host->ops->get_timeout_clock(host); | 
|  | } | 
|  | if (caps & SDHCI_TIMEOUT_CLK_UNIT) | 
|  | host->timeout_clk *= 1000; | 
|  |  | 
|  | /* | 
|  | * Set host parameters. | 
|  | */ | 
|  | mmc->ops = &sdhci_ops; | 
|  | mmc->f_min = host->max_clk / 256; | 
|  | mmc->f_max = host->max_clk; | 
|  | mmc->caps = MMC_CAP_SDIO_IRQ; | 
|  |  | 
|  | if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) | 
|  | mmc->caps |= MMC_CAP_4_BIT_DATA; | 
|  |  | 
|  | if (caps & SDHCI_CAN_DO_HISPD) | 
|  | mmc->caps |= MMC_CAP_SD_HIGHSPEED; | 
|  |  | 
|  | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) | 
|  | mmc->caps |= MMC_CAP_NEEDS_POLL; | 
|  |  | 
|  | mmc->ocr_avail = 0; | 
|  | if (caps & SDHCI_CAN_VDD_330) | 
|  | mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; | 
|  | if (caps & SDHCI_CAN_VDD_300) | 
|  | mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; | 
|  | if (caps & SDHCI_CAN_VDD_180) | 
|  | mmc->ocr_avail |= MMC_VDD_165_195; | 
|  |  | 
|  | if (mmc->ocr_avail == 0) { | 
|  | printk(KERN_ERR "%s: Hardware doesn't report any " | 
|  | "support voltages.\n", mmc_hostname(mmc)); | 
|  | return -ENODEV; | 
|  | } | 
|  |  | 
|  | spin_lock_init(&host->lock); | 
|  |  | 
|  | /* | 
|  | * Maximum number of segments. Depends on if the hardware | 
|  | * can do scatter/gather or not. | 
|  | */ | 
|  | if (host->flags & SDHCI_USE_ADMA) | 
|  | mmc->max_hw_segs = 128; | 
|  | else if (host->flags & SDHCI_USE_DMA) | 
|  | mmc->max_hw_segs = 1; | 
|  | else /* PIO */ | 
|  | mmc->max_hw_segs = 128; | 
|  | mmc->max_phys_segs = 128; | 
|  |  | 
|  | /* | 
|  | * Maximum number of sectors in one transfer. Limited by DMA boundary | 
|  | * size (512KiB). | 
|  | */ | 
|  | mmc->max_req_size = 524288; | 
|  |  | 
|  | /* | 
|  | * Maximum segment size. Could be one segment with the maximum number | 
|  | * of bytes. When doing hardware scatter/gather, each entry cannot | 
|  | * be larger than 64 KiB though. | 
|  | */ | 
|  | if (host->flags & SDHCI_USE_ADMA) | 
|  | mmc->max_seg_size = 65536; | 
|  | else | 
|  | mmc->max_seg_size = mmc->max_req_size; | 
|  |  | 
|  | /* | 
|  | * Maximum block size. This varies from controller to controller and | 
|  | * is specified in the capabilities register. | 
|  | */ | 
|  | if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { | 
|  | mmc->max_blk_size = 2; | 
|  | } else { | 
|  | mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> | 
|  | SDHCI_MAX_BLOCK_SHIFT; | 
|  | if (mmc->max_blk_size >= 3) { | 
|  | printk(KERN_WARNING "%s: Invalid maximum block size, " | 
|  | "assuming 512 bytes\n", mmc_hostname(mmc)); | 
|  | mmc->max_blk_size = 0; | 
|  | } | 
|  | } | 
|  |  | 
|  | mmc->max_blk_size = 512 << mmc->max_blk_size; | 
|  |  | 
|  | /* | 
|  | * Maximum block count. | 
|  | */ | 
|  | mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; | 
|  |  | 
|  | /* | 
|  | * Init tasklets. | 
|  | */ | 
|  | tasklet_init(&host->card_tasklet, | 
|  | sdhci_tasklet_card, (unsigned long)host); | 
|  | tasklet_init(&host->finish_tasklet, | 
|  | sdhci_tasklet_finish, (unsigned long)host); | 
|  |  | 
|  | setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); | 
|  |  | 
|  | ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, | 
|  | mmc_hostname(mmc), host); | 
|  | if (ret) | 
|  | goto untasklet; | 
|  |  | 
|  | sdhci_init(host); | 
|  |  | 
|  | #ifdef CONFIG_MMC_DEBUG | 
|  | sdhci_dumpregs(host); | 
|  | #endif | 
|  |  | 
|  | #ifdef SDHCI_USE_LEDS_CLASS | 
|  | snprintf(host->led_name, sizeof(host->led_name), | 
|  | "%s::", mmc_hostname(mmc)); | 
|  | host->led.name = host->led_name; | 
|  | host->led.brightness = LED_OFF; | 
|  | host->led.default_trigger = mmc_hostname(mmc); | 
|  | host->led.brightness_set = sdhci_led_control; | 
|  |  | 
|  | ret = led_classdev_register(mmc_dev(mmc), &host->led); | 
|  | if (ret) | 
|  | goto reset; | 
|  | #endif | 
|  |  | 
|  | mmiowb(); | 
|  |  | 
|  | mmc_add_host(mmc); | 
|  |  | 
|  | printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", | 
|  | mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), | 
|  | (host->flags & SDHCI_USE_ADMA)?"A":"", | 
|  | (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); | 
|  |  | 
|  | sdhci_enable_card_detection(host); | 
|  |  | 
|  | return 0; | 
|  |  | 
|  | #ifdef SDHCI_USE_LEDS_CLASS | 
|  | reset: | 
|  | sdhci_reset(host, SDHCI_RESET_ALL); | 
|  | free_irq(host->irq, host); | 
|  | #endif | 
|  | untasklet: | 
|  | tasklet_kill(&host->card_tasklet); | 
|  | tasklet_kill(&host->finish_tasklet); | 
|  |  | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | EXPORT_SYMBOL_GPL(sdhci_add_host); | 
|  |  | 
|  | void sdhci_remove_host(struct sdhci_host *host, int dead) | 
|  | { | 
|  | unsigned long flags; | 
|  |  | 
|  | if (dead) { | 
|  | spin_lock_irqsave(&host->lock, flags); | 
|  |  | 
|  | host->flags |= SDHCI_DEVICE_DEAD; | 
|  |  | 
|  | if (host->mrq) { | 
|  | printk(KERN_ERR "%s: Controller removed during " | 
|  | " transfer!\n", mmc_hostname(host->mmc)); | 
|  |  | 
|  | host->mrq->cmd->error = -ENOMEDIUM; | 
|  | tasklet_schedule(&host->finish_tasklet); | 
|  | } | 
|  |  | 
|  | spin_unlock_irqrestore(&host->lock, flags); | 
|  | } | 
|  |  | 
|  | sdhci_disable_card_detection(host); | 
|  |  | 
|  | mmc_remove_host(host->mmc); | 
|  |  | 
|  | #ifdef SDHCI_USE_LEDS_CLASS | 
|  | led_classdev_unregister(&host->led); | 
|  | #endif | 
|  |  | 
|  | if (!dead) | 
|  | sdhci_reset(host, SDHCI_RESET_ALL); | 
|  |  | 
|  | free_irq(host->irq, host); | 
|  |  | 
|  | del_timer_sync(&host->timer); | 
|  |  | 
|  | tasklet_kill(&host->card_tasklet); | 
|  | tasklet_kill(&host->finish_tasklet); | 
|  |  | 
|  | kfree(host->adma_desc); | 
|  | kfree(host->align_buffer); | 
|  |  | 
|  | host->adma_desc = NULL; | 
|  | host->align_buffer = NULL; | 
|  | } | 
|  |  | 
|  | EXPORT_SYMBOL_GPL(sdhci_remove_host); | 
|  |  | 
|  | void sdhci_free_host(struct sdhci_host *host) | 
|  | { | 
|  | mmc_free_host(host->mmc); | 
|  | } | 
|  |  | 
|  | EXPORT_SYMBOL_GPL(sdhci_free_host); | 
|  |  | 
|  | /*****************************************************************************\ | 
|  | *                                                                           * | 
|  | * Driver init/exit                                                          * | 
|  | *                                                                           * | 
|  | \*****************************************************************************/ | 
|  |  | 
|  | static int __init sdhci_drv_init(void) | 
|  | { | 
|  | printk(KERN_INFO DRIVER_NAME | 
|  | ": Secure Digital Host Controller Interface driver\n"); | 
|  | printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | static void __exit sdhci_drv_exit(void) | 
|  | { | 
|  | } | 
|  |  | 
|  | module_init(sdhci_drv_init); | 
|  | module_exit(sdhci_drv_exit); | 
|  |  | 
|  | module_param(debug_quirks, uint, 0444); | 
|  |  | 
|  | MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); | 
|  | MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); | 
|  | MODULE_LICENSE("GPL"); | 
|  |  | 
|  | MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); |