Giuseppe CAVALLARO | 3c32be6 | 2010-04-13 20:21:11 +0000 | [diff] [blame^] | 1 | /******************************************************************************* |
| 2 | This is the driver for the MAC 10/100 on-chip Ethernet controller |
| 3 | currently tested on all the ST boards based on STb7109 and stx7200 SoCs. |
| 4 | |
| 5 | DWC Ether MAC 10/100 Universal version 4.0 has been used for developing |
| 6 | this code. |
| 7 | |
| 8 | This contains the functions to handle the dma and descriptors. |
| 9 | |
| 10 | Copyright (C) 2007-2009 STMicroelectronics Ltd |
| 11 | |
| 12 | This program is free software; you can redistribute it and/or modify it |
| 13 | under the terms and conditions of the GNU General Public License, |
| 14 | version 2, as published by the Free Software Foundation. |
| 15 | |
| 16 | This program is distributed in the hope it will be useful, but WITHOUT |
| 17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 19 | more details. |
| 20 | |
| 21 | You should have received a copy of the GNU General Public License along with |
| 22 | this program; if not, write to the Free Software Foundation, Inc., |
| 23 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
| 24 | |
| 25 | The full GNU General Public License is included in this distribution in |
| 26 | the file called "COPYING". |
| 27 | |
| 28 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
| 29 | *******************************************************************************/ |
| 30 | |
| 31 | #include "dwmac100.h" |
| 32 | #include "dwmac_dma.h" |
| 33 | |
| 34 | static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, |
| 35 | u32 dma_rx) |
| 36 | { |
| 37 | u32 value = readl(ioaddr + DMA_BUS_MODE); |
| 38 | /* DMA SW reset */ |
| 39 | value |= DMA_BUS_MODE_SFT_RESET; |
| 40 | writel(value, ioaddr + DMA_BUS_MODE); |
| 41 | do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); |
| 42 | |
| 43 | /* Enable Application Access by writing to DMA CSR0 */ |
| 44 | writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), |
| 45 | ioaddr + DMA_BUS_MODE); |
| 46 | |
| 47 | /* Mask interrupts by writing to CSR7 */ |
| 48 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); |
| 49 | |
| 50 | /* The base address of the RX/TX descriptor lists must be written into |
| 51 | * DMA CSR3 and CSR4, respectively. */ |
| 52 | writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); |
| 53 | writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); |
| 54 | |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | /* Store and Forward capability is not used at all.. |
| 59 | * The transmit threshold can be programmed by |
| 60 | * setting the TTC bits in the DMA control register.*/ |
| 61 | static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode, |
| 62 | int rxmode) |
| 63 | { |
| 64 | u32 csr6 = readl(ioaddr + DMA_CONTROL); |
| 65 | |
| 66 | if (txmode <= 32) |
| 67 | csr6 |= DMA_CONTROL_TTC_32; |
| 68 | else if (txmode <= 64) |
| 69 | csr6 |= DMA_CONTROL_TTC_64; |
| 70 | else |
| 71 | csr6 |= DMA_CONTROL_TTC_128; |
| 72 | |
| 73 | writel(csr6, ioaddr + DMA_CONTROL); |
| 74 | |
| 75 | return; |
| 76 | } |
| 77 | |
| 78 | static void dwmac100_dump_dma_regs(unsigned long ioaddr) |
| 79 | { |
| 80 | int i; |
| 81 | |
| 82 | DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); |
| 83 | for (i = 0; i < 9; i++) |
| 84 | pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, |
| 85 | (DMA_BUS_MODE + i * 4), |
| 86 | readl(ioaddr + DMA_BUS_MODE + i * 4)); |
| 87 | DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", |
| 88 | DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); |
| 89 | DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", |
| 90 | DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); |
| 91 | return; |
| 92 | } |
| 93 | |
| 94 | /* DMA controller has two counters to track the number of |
| 95 | * the receive missed frames. */ |
| 96 | static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, |
| 97 | unsigned long ioaddr) |
| 98 | { |
| 99 | struct net_device_stats *stats = (struct net_device_stats *)data; |
| 100 | u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); |
| 101 | |
| 102 | if (unlikely(csr8)) { |
| 103 | if (csr8 & DMA_MISSED_FRAME_OVE) { |
| 104 | stats->rx_over_errors += 0x800; |
| 105 | x->rx_overflow_cntr += 0x800; |
| 106 | } else { |
| 107 | unsigned int ove_cntr; |
| 108 | ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); |
| 109 | stats->rx_over_errors += ove_cntr; |
| 110 | x->rx_overflow_cntr += ove_cntr; |
| 111 | } |
| 112 | |
| 113 | if (csr8 & DMA_MISSED_FRAME_OVE_M) { |
| 114 | stats->rx_missed_errors += 0xffff; |
| 115 | x->rx_missed_cntr += 0xffff; |
| 116 | } else { |
| 117 | unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); |
| 118 | stats->rx_missed_errors += miss_f; |
| 119 | x->rx_missed_cntr += miss_f; |
| 120 | } |
| 121 | } |
| 122 | return; |
| 123 | } |
| 124 | |
| 125 | static int dwmac100_get_tx_status(void *data, struct stmmac_extra_stats *x, |
| 126 | struct dma_desc *p, unsigned long ioaddr) |
| 127 | { |
| 128 | int ret = 0; |
| 129 | struct net_device_stats *stats = (struct net_device_stats *)data; |
| 130 | |
| 131 | if (unlikely(p->des01.tx.error_summary)) { |
| 132 | if (unlikely(p->des01.tx.underflow_error)) { |
| 133 | x->tx_underflow++; |
| 134 | stats->tx_fifo_errors++; |
| 135 | } |
| 136 | if (unlikely(p->des01.tx.no_carrier)) { |
| 137 | x->tx_carrier++; |
| 138 | stats->tx_carrier_errors++; |
| 139 | } |
| 140 | if (unlikely(p->des01.tx.loss_carrier)) { |
| 141 | x->tx_losscarrier++; |
| 142 | stats->tx_carrier_errors++; |
| 143 | } |
| 144 | if (unlikely((p->des01.tx.excessive_deferral) || |
| 145 | (p->des01.tx.excessive_collisions) || |
| 146 | (p->des01.tx.late_collision))) |
| 147 | stats->collisions += p->des01.tx.collision_count; |
| 148 | ret = -1; |
| 149 | } |
| 150 | if (unlikely(p->des01.tx.heartbeat_fail)) { |
| 151 | x->tx_heartbeat++; |
| 152 | stats->tx_heartbeat_errors++; |
| 153 | ret = -1; |
| 154 | } |
| 155 | if (unlikely(p->des01.tx.deferred)) |
| 156 | x->tx_deferred++; |
| 157 | |
| 158 | return ret; |
| 159 | } |
| 160 | |
| 161 | static int dwmac100_get_tx_len(struct dma_desc *p) |
| 162 | { |
| 163 | return p->des01.tx.buffer1_size; |
| 164 | } |
| 165 | |
| 166 | /* This function verifies if each incoming frame has some errors |
| 167 | * and, if required, updates the multicast statistics. |
| 168 | * In case of success, it returns csum_none becasue the device |
| 169 | * is not able to compute the csum in HW. */ |
| 170 | static int dwmac100_get_rx_status(void *data, struct stmmac_extra_stats *x, |
| 171 | struct dma_desc *p) |
| 172 | { |
| 173 | int ret = csum_none; |
| 174 | struct net_device_stats *stats = (struct net_device_stats *)data; |
| 175 | |
| 176 | if (unlikely(p->des01.rx.last_descriptor == 0)) { |
| 177 | pr_warning("dwmac100 Error: Oversized Ethernet " |
| 178 | "frame spanned multiple buffers\n"); |
| 179 | stats->rx_length_errors++; |
| 180 | return discard_frame; |
| 181 | } |
| 182 | |
| 183 | if (unlikely(p->des01.rx.error_summary)) { |
| 184 | if (unlikely(p->des01.rx.descriptor_error)) |
| 185 | x->rx_desc++; |
| 186 | if (unlikely(p->des01.rx.partial_frame_error)) |
| 187 | x->rx_partial++; |
| 188 | if (unlikely(p->des01.rx.run_frame)) |
| 189 | x->rx_runt++; |
| 190 | if (unlikely(p->des01.rx.frame_too_long)) |
| 191 | x->rx_toolong++; |
| 192 | if (unlikely(p->des01.rx.collision)) { |
| 193 | x->rx_collision++; |
| 194 | stats->collisions++; |
| 195 | } |
| 196 | if (unlikely(p->des01.rx.crc_error)) { |
| 197 | x->rx_crc++; |
| 198 | stats->rx_crc_errors++; |
| 199 | } |
| 200 | ret = discard_frame; |
| 201 | } |
| 202 | if (unlikely(p->des01.rx.dribbling)) |
| 203 | ret = discard_frame; |
| 204 | |
| 205 | if (unlikely(p->des01.rx.length_error)) { |
| 206 | x->rx_length++; |
| 207 | ret = discard_frame; |
| 208 | } |
| 209 | if (unlikely(p->des01.rx.mii_error)) { |
| 210 | x->rx_mii++; |
| 211 | ret = discard_frame; |
| 212 | } |
| 213 | if (p->des01.rx.multicast_frame) { |
| 214 | x->rx_multicast++; |
| 215 | stats->multicast++; |
| 216 | } |
| 217 | return ret; |
| 218 | } |
| 219 | |
| 220 | static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, |
| 221 | int disable_rx_ic) |
| 222 | { |
| 223 | int i; |
| 224 | for (i = 0; i < ring_size; i++) { |
| 225 | p->des01.rx.own = 1; |
| 226 | p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; |
| 227 | if (i == ring_size - 1) |
| 228 | p->des01.rx.end_ring = 1; |
| 229 | if (disable_rx_ic) |
| 230 | p->des01.rx.disable_ic = 1; |
| 231 | p++; |
| 232 | } |
| 233 | return; |
| 234 | } |
| 235 | |
| 236 | static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) |
| 237 | { |
| 238 | int i; |
| 239 | for (i = 0; i < ring_size; i++) { |
| 240 | p->des01.tx.own = 0; |
| 241 | if (i == ring_size - 1) |
| 242 | p->des01.tx.end_ring = 1; |
| 243 | p++; |
| 244 | } |
| 245 | return; |
| 246 | } |
| 247 | |
| 248 | static int dwmac100_get_tx_owner(struct dma_desc *p) |
| 249 | { |
| 250 | return p->des01.tx.own; |
| 251 | } |
| 252 | |
| 253 | static int dwmac100_get_rx_owner(struct dma_desc *p) |
| 254 | { |
| 255 | return p->des01.rx.own; |
| 256 | } |
| 257 | |
| 258 | static void dwmac100_set_tx_owner(struct dma_desc *p) |
| 259 | { |
| 260 | p->des01.tx.own = 1; |
| 261 | } |
| 262 | |
| 263 | static void dwmac100_set_rx_owner(struct dma_desc *p) |
| 264 | { |
| 265 | p->des01.rx.own = 1; |
| 266 | } |
| 267 | |
| 268 | static int dwmac100_get_tx_ls(struct dma_desc *p) |
| 269 | { |
| 270 | return p->des01.tx.last_segment; |
| 271 | } |
| 272 | |
| 273 | static void dwmac100_release_tx_desc(struct dma_desc *p) |
| 274 | { |
| 275 | int ter = p->des01.tx.end_ring; |
| 276 | |
| 277 | /* clean field used within the xmit */ |
| 278 | p->des01.tx.first_segment = 0; |
| 279 | p->des01.tx.last_segment = 0; |
| 280 | p->des01.tx.buffer1_size = 0; |
| 281 | |
| 282 | /* clean status reported */ |
| 283 | p->des01.tx.error_summary = 0; |
| 284 | p->des01.tx.underflow_error = 0; |
| 285 | p->des01.tx.no_carrier = 0; |
| 286 | p->des01.tx.loss_carrier = 0; |
| 287 | p->des01.tx.excessive_deferral = 0; |
| 288 | p->des01.tx.excessive_collisions = 0; |
| 289 | p->des01.tx.late_collision = 0; |
| 290 | p->des01.tx.heartbeat_fail = 0; |
| 291 | p->des01.tx.deferred = 0; |
| 292 | |
| 293 | /* set termination field */ |
| 294 | p->des01.tx.end_ring = ter; |
| 295 | |
| 296 | return; |
| 297 | } |
| 298 | |
| 299 | static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, |
| 300 | int csum_flag) |
| 301 | { |
| 302 | p->des01.tx.first_segment = is_fs; |
| 303 | p->des01.tx.buffer1_size = len; |
| 304 | } |
| 305 | |
| 306 | static void dwmac100_clear_tx_ic(struct dma_desc *p) |
| 307 | { |
| 308 | p->des01.tx.interrupt = 0; |
| 309 | } |
| 310 | |
| 311 | static void dwmac100_close_tx_desc(struct dma_desc *p) |
| 312 | { |
| 313 | p->des01.tx.last_segment = 1; |
| 314 | p->des01.tx.interrupt = 1; |
| 315 | } |
| 316 | |
| 317 | static int dwmac100_get_rx_frame_len(struct dma_desc *p) |
| 318 | { |
| 319 | return p->des01.rx.frame_length; |
| 320 | } |
| 321 | |
| 322 | struct stmmac_dma_ops dwmac100_dma_ops = { |
| 323 | .init = dwmac100_dma_init, |
| 324 | .dump_regs = dwmac100_dump_dma_regs, |
| 325 | .dma_mode = dwmac100_dma_operation_mode, |
| 326 | .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, |
| 327 | .enable_dma_transmission = dwmac_enable_dma_transmission, |
| 328 | .enable_dma_irq = dwmac_enable_dma_irq, |
| 329 | .disable_dma_irq = dwmac_disable_dma_irq, |
| 330 | .start_tx = dwmac_dma_start_tx, |
| 331 | .stop_tx = dwmac_dma_stop_tx, |
| 332 | .start_rx = dwmac_dma_start_rx, |
| 333 | .stop_rx = dwmac_dma_stop_rx, |
| 334 | .dma_interrupt = dwmac_dma_interrupt, |
| 335 | }; |
| 336 | |
| 337 | struct stmmac_desc_ops dwmac100_desc_ops = { |
| 338 | .tx_status = dwmac100_get_tx_status, |
| 339 | .rx_status = dwmac100_get_rx_status, |
| 340 | .get_tx_len = dwmac100_get_tx_len, |
| 341 | .init_rx_desc = dwmac100_init_rx_desc, |
| 342 | .init_tx_desc = dwmac100_init_tx_desc, |
| 343 | .get_tx_owner = dwmac100_get_tx_owner, |
| 344 | .get_rx_owner = dwmac100_get_rx_owner, |
| 345 | .release_tx_desc = dwmac100_release_tx_desc, |
| 346 | .prepare_tx_desc = dwmac100_prepare_tx_desc, |
| 347 | .clear_tx_ic = dwmac100_clear_tx_ic, |
| 348 | .close_tx_desc = dwmac100_close_tx_desc, |
| 349 | .get_tx_ls = dwmac100_get_tx_ls, |
| 350 | .set_tx_owner = dwmac100_set_tx_owner, |
| 351 | .set_rx_owner = dwmac100_set_rx_owner, |
| 352 | .get_rx_frame_len = dwmac100_get_rx_frame_len, |
| 353 | }; |