blob: dd91e6631b0e4fbfd546c520c2bc7543c4c0a7da [file] [log] [blame]
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -06001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * MSM PCIe controller driver.
15 */
16
17#define pr_fmt(fmt) "%s: " fmt, __func__
18
19#include <linux/bitops.h>
20#include <linux/clk.h>
21#include <linux/debugfs.h>
22#include <linux/delay.h>
23#include <linux/gpio.h>
24#include <linux/iopoll.h>
25#include <linux/kernel.h>
26#include <linux/pci.h>
27#include <linux/platform_device.h>
28#include <linux/regulator/consumer.h>
29#include <linux/types.h>
30#include <asm/mach/pci.h>
31#include <mach/gpiomux.h>
32#include <mach/hardware.h>
33#include <mach/msm_iomap.h>
34
35#include "pcie.h"
36
37/* Root Complex Port vendor/device IDs */
38#define PCIE_VENDOR_ID_RCP 0x17cb
39#define PCIE_DEVICE_ID_RCP 0x0101
40
41#define PCIE20_PARF_PCS_DEEMPH 0x34
42#define PCIE20_PARF_PCS_SWING 0x38
43#define PCIE20_PARF_PHY_CTRL 0x40
44#define PCIE20_PARF_PHY_REFCLK 0x4C
45#define PCIE20_PARF_CONFIG_BITS 0x50
46
47#define PCIE20_ELBI_SYS_CTRL 0x04
48
49#define PCIE20_CAP 0x70
50#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
51
52#define PCIE20_COMMAND_STATUS 0x04
53#define PCIE20_BUSNUMBERS 0x18
54#define PCIE20_MEMORY_BASE_LIMIT 0x20
55
56#define PCIE20_PLR_IATU_VIEWPORT 0x900
57#define PCIE20_PLR_IATU_CTRL1 0x904
58#define PCIE20_PLR_IATU_CTRL2 0x908
59#define PCIE20_PLR_IATU_LBAR 0x90C
60#define PCIE20_PLR_IATU_UBAR 0x910
61#define PCIE20_PLR_IATU_LAR 0x914
62#define PCIE20_PLR_IATU_LTAR 0x918
63#define PCIE20_PLR_IATU_UTAR 0x91c
64
65#define PCIE_RESET (MSM_CLK_CTL_BASE + 0x22dc)
66#define PCIE_SFAB_AXI_S5_FCLK_CTL (MSM_CLK_CTL_BASE + 0x2154)
67
68#define MSM_PCIE_DEV_BAR_ADDR PCIBIOS_MIN_MEM
69#define MSM_PCIE_DEV_CFG_ADDR 0x01000000
70
71#define RD 0
72#define WR 1
73
74/* debug mask sys interface */
75static int msm_pcie_debug_mask;
76module_param_named(debug_mask, msm_pcie_debug_mask,
77 int, S_IRUGO | S_IWUSR | S_IWGRP);
78
79/* resources from device file */
80enum msm_pcie_res {
81 MSM_PCIE_RES_PARF,
82 MSM_PCIE_RES_ELBI,
83 MSM_PCIE_RES_PCIE20,
84 MSM_PCIE_RES_AXI_BAR,
85 MSM_PCIE_RES_AXI_CONF,
86 MSM_PCIE_MAX_RES
87};
88
89/* msm pcie device data */
90static struct msm_pcie_dev_t msm_pcie_dev;
91
92/* regulators */
93static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
94 {NULL, "vp_pcie", 1050000, 1050000, 40900},
95 {NULL, "vptx_pcie", 1050000, 1050000, 18200},
96 {NULL, "vdd_pcie_vph", 0, 0, 0},
97 {NULL, "pcie_ext_3p3v", 0, 0, 0}
98};
99
100/* clocks */
101static struct msm_pcie_clk_info_t msm_pcie_clk_info[MSM_PCIE_MAX_CLK] = {
102 {NULL, "bus_clk"},
103 {NULL, "iface_clk"},
104 {NULL, "ref_clk"}
105};
106
107/* resources */
108static struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
109 {"parf", 0, 0, 0},
110 {"elbi", 0, 0, 0},
111 {"pcie20", 0, 0, 0},
112 {"axi_bar", 0, 0, 0},
113 {"axi_conf", 0, 0, 0},
114};
115
116int msm_pcie_get_debug_mask(void)
117{
118 return msm_pcie_debug_mask;
119}
120
121static void msm_pcie_write_mask(void __iomem *addr,
122 uint32_t clear_mask, uint32_t set_mask)
123{
124 uint32_t val;
125
126 val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
127 writel_relaxed(val, addr);
128 wmb(); /* ensure data is written to hardware register */
129}
130
131static int msm_pcie_is_link_up(void)
132{
133 return readl_relaxed(msm_pcie_dev.pcie20 + PCIE20_CAP_LINKCTRLSTATUS) &
134 BIT(29);
135}
136
137static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
138 int where, int size, u32 *val)
139{
140 uint32_t word_offset, byte_offset, mask;
141 uint32_t rd_val, wr_val;
142 struct msm_pcie_dev_t *dev = &msm_pcie_dev;
143 void __iomem *config_base;
144
145 /*
146 * Only buses 0 and 1 are supported. RC port on bus 0 and EP in bus 1.
147 * For downstream bus (1), make sure link is up
148 */
149 if ((bus->number > 1) || (devfn != 0)) {
150 PCIE_DBG("invalid %s - bus %d devfn %d\n",
151 (oper == RD) ? "rd" : "wr", bus->number, devfn);
152 *val = ~0;
153 return PCIBIOS_DEVICE_NOT_FOUND;
154 } else if ((bus->number != 0) && !msm_pcie_is_link_up()) {
155 PCIE_DBG("%s fail, link down - bus %d devfn %d\n",
156 (oper == RD) ? "rd" : "wr", bus->number, devfn);
157 *val = ~0;
158 return PCIBIOS_DEVICE_NOT_FOUND;
159 }
160
161 word_offset = where & ~0x3;
162 byte_offset = where & 0x3;
163 mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
164
165 config_base = (bus->number == 0) ? dev->pcie20 : dev->axi_conf;
166 rd_val = readl_relaxed(config_base + word_offset);
167
168 if (oper == RD) {
169 *val = ((rd_val & mask) >> (8 * byte_offset));
170
171 PCIE_DBG("%d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
172 bus->number, devfn, where, size, *val, rd_val);
173 } else {
174 wr_val = (rd_val & ~mask) |
175 ((*val << (8 * byte_offset)) & mask);
176 writel_relaxed(wr_val, config_base + word_offset);
177 wmb(); /* ensure config data is written to hardware register */
178
179 PCIE_DBG("%d:0x%02x + 0x%04x[%d] <- 0x%08x;"
180 " rd 0x%08x val 0x%08x\n", bus->number,
181 devfn, where, size, wr_val, rd_val, *val);
182 }
183
184 return 0;
185}
186
187static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
188 int size, u32 *val)
189{
190 return msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
191}
192
193static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
194 int where, int size, u32 val)
195{
196 return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
197}
198
199static struct pci_ops msm_pcie_ops = {
200 .read = msm_pcie_rd_conf,
201 .write = msm_pcie_wr_conf,
202};
203
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600204static int __init msm_pcie_gpio_init(void)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600205{
206 int rc, i;
207 struct msm_pcie_gpio_info_t *info;
208
209 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
210 info = &msm_pcie_dev.gpio[i];
211
212 rc = gpio_request(info->num, info->name);
213 if (rc) {
214 pr_err("can't get gpio %s; %d\n", info->name, rc);
215 break;
216 }
217
218 rc = gpio_direction_output(info->num, 0);
219 if (rc) {
220 pr_err("can't set gpio direction %s; %d\n",
221 info->name, rc);
222 gpio_free(info->num);
223 break;
224 }
225 }
226
227 if (rc)
228 while (i--)
229 gpio_free(msm_pcie_dev.gpio[i].num);
230
231 return rc;
232}
233
234static void msm_pcie_gpio_deinit(void)
235{
236 int i;
237
238 for (i = 0; i < MSM_PCIE_MAX_GPIO; i++)
239 gpio_free(msm_pcie_dev.gpio[i].num);
240}
241
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600242static int __init msm_pcie_vreg_init(struct device *dev)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600243{
244 int i, rc = 0;
245 struct regulator *vreg;
246 struct msm_pcie_vreg_info_t *info;
247
248 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
249 info = &msm_pcie_dev.vreg[i];
250
251 vreg = regulator_get(dev, info->name);
252 if (!vreg || IS_ERR(vreg)) {
253 rc = (PTR_ERR(vreg)) ? PTR_ERR(vreg) : -ENODEV;
254 pr_err("can't get %s; %d\n", info->name, rc);
255 break;
256 }
257
258 if (info->max_v) {
259 rc = regulator_set_voltage(vreg,
260 info->min_v, info->max_v);
261 if (rc) {
262 pr_err("can't set voltage %s; %d\n",
263 info->name, rc);
264 regulator_put(vreg);
265 break;
266 }
267 }
268
269 if (info->opt_mode) {
270 rc = regulator_set_optimum_mode(vreg, info->opt_mode);
271 if (rc < 0) {
272 pr_err("can't set mode %s; %d\n",
273 info->name, rc);
274 regulator_put(vreg);
275 break;
276 }
277 }
278
279 rc = regulator_enable(vreg);
280 if (rc) {
281 pr_err("can't enable %s, %d\n", info->name, rc);
282 regulator_put(vreg);
283 break;
284 }
285 info->hdl = vreg;
286 }
287
288 if (rc)
289 while (i--) {
290 regulator_disable(msm_pcie_dev.vreg[i].hdl);
291 regulator_put(msm_pcie_dev.vreg[i].hdl);
292 msm_pcie_dev.vreg[i].hdl = NULL;
293 }
294
295 return rc;
296}
297
298static void msm_pcie_vreg_deinit(void)
299{
300 int i;
301
302 for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
303 regulator_disable(msm_pcie_dev.vreg[i].hdl);
304 regulator_put(msm_pcie_dev.vreg[i].hdl);
305 msm_pcie_dev.vreg[i].hdl = NULL;
306 }
307}
308
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600309static int __init msm_pcie_clk_init(struct device *dev)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600310{
311 int i, rc = 0;
312 struct clk *clk_hdl;
313 struct msm_pcie_clk_info_t *info;
314
315 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
316 info = &msm_pcie_dev.clk[i];
317
318 clk_hdl = clk_get(dev, info->name);
319 if (!clk_hdl || IS_ERR(clk_hdl)) {
320 rc = (PTR_ERR(clk_hdl)) ? PTR_ERR(clk_hdl) : -ENODEV;
321 pr_err("can't get clk %s; %d\n", info->name, rc);
322 break;
323 }
324 clk_prepare_enable(clk_hdl);
325 info->hdl = clk_hdl;
326 }
327
328 if (rc)
329 while (i--) {
330 clk_disable_unprepare(msm_pcie_dev.clk[i].hdl);
331 clk_put(msm_pcie_dev.clk[i].hdl);
332 msm_pcie_dev.clk[i].hdl = NULL;
333 }
334
335 return rc;
336}
337
338static void msm_pcie_clk_deinit(void)
339{
340 int i;
341
342 for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
343 clk_disable_unprepare(msm_pcie_dev.clk[i].hdl);
344 clk_put(msm_pcie_dev.clk[i].hdl);
345 msm_pcie_dev.clk[i].hdl = NULL;
346 }
347}
348
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600349static void __init msm_pcie_config_controller(void)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600350{
351 struct msm_pcie_dev_t *dev = &msm_pcie_dev;
352 struct msm_pcie_res_info_t *axi_bar = &dev->res[MSM_PCIE_RES_AXI_BAR];
353 struct msm_pcie_res_info_t *axi_conf = &dev->res[MSM_PCIE_RES_AXI_CONF];
354
355 /*
356 * program and enable address translation region 0 (device config
357 * address space); region type config;
358 * axi config address range to device config address range
359 */
360 writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_VIEWPORT);
361 /* ensure that hardware locks the region before programming it */
362 wmb();
363
364 writel_relaxed(4, dev->pcie20 + PCIE20_PLR_IATU_CTRL1);
365 writel_relaxed(BIT(31), dev->pcie20 + PCIE20_PLR_IATU_CTRL2);
366 writel_relaxed(axi_conf->start, dev->pcie20 + PCIE20_PLR_IATU_LBAR);
367 writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UBAR);
368 writel_relaxed(axi_conf->end, dev->pcie20 + PCIE20_PLR_IATU_LAR);
369 writel_relaxed(MSM_PCIE_DEV_CFG_ADDR,
370 dev->pcie20 + PCIE20_PLR_IATU_LTAR);
371 writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UTAR);
372 /* ensure that hardware registers the configuration */
373 wmb();
374
375 /*
376 * program and enable address translation region 2 (device resource
377 * address space); region type memory;
378 * axi device bar address range to device bar address range
379 */
380 writel_relaxed(2, dev->pcie20 + PCIE20_PLR_IATU_VIEWPORT);
381 /* ensure that hardware locks the region before programming it */
382 wmb();
383
384 writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_CTRL1);
385 writel_relaxed(BIT(31), dev->pcie20 + PCIE20_PLR_IATU_CTRL2);
386 writel_relaxed(axi_bar->start, dev->pcie20 + PCIE20_PLR_IATU_LBAR);
387 writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UBAR);
388 writel_relaxed(axi_bar->end, dev->pcie20 + PCIE20_PLR_IATU_LAR);
389 writel_relaxed(MSM_PCIE_DEV_BAR_ADDR,
390 dev->pcie20 + PCIE20_PLR_IATU_LTAR);
391 writel_relaxed(0, dev->pcie20 + PCIE20_PLR_IATU_UTAR);
392 /* ensure that hardware registers the configuration */
393 wmb();
394}
395
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600396static int __init msm_pcie_get_resources(struct platform_device *pdev)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600397{
398 int i, rc = 0;
399 struct resource *res;
400 struct msm_pcie_res_info_t *info;
401 struct msm_pcie_dev_t *dev = &msm_pcie_dev;
402
403 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
404 info = &dev->res[i];
405
406 res = platform_get_resource_byname(pdev,
407 IORESOURCE_MEM, info->name);
408 if (!res) {
409 pr_err("can't get %s resource\n", info->name);
410 rc = -ENOMEM;
411 break;
412 }
413
414 info->base = ioremap(res->start, resource_size(res));
415 if (!info->base) {
416 pr_err("can't remap %s\n", info->name);
417 rc = -ENOMEM;
418 break;
419 }
420
421 info->start = res->start;
422 info->end = res->end;
423 }
424
425 if (rc) {
426 while (i--) {
427 iounmap(dev->res[i].base);
428 dev->res[i].base = NULL;
429 }
430 } else {
431 dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
432 dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
433 dev->pcie20 = dev->res[MSM_PCIE_RES_PCIE20].base;
434 dev->axi_conf = dev->res[MSM_PCIE_RES_AXI_CONF].base;
435 }
436
437 return rc;
438}
439
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600440static void msm_pcie_release_resources(void)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600441{
442 int i;
443
444 for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
445 iounmap(msm_pcie_dev.res[i].base);
446 msm_pcie_dev.res[i].base = NULL;
447 }
448
449 msm_pcie_dev.parf = NULL;
450 msm_pcie_dev.elbi = NULL;
451 msm_pcie_dev.pcie20 = NULL;
452 msm_pcie_dev.axi_conf = NULL;
453}
454
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600455static int __init msm_pcie_setup(int nr, struct pci_sys_data *sys)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600456{
457 int rc;
458 struct msm_pcie_dev_t *dev = &msm_pcie_dev;
459 uint32_t val;
460
461 PCIE_DBG("bus %d\n", nr);
462 if (nr != 0)
463 return 0;
464
465 /* assert PCIe reset link to keep EP in reset */
466 gpio_set_value_cansleep(dev->gpio[MSM_PCIE_GPIO_RST_N].num,
467 dev->gpio[MSM_PCIE_GPIO_RST_N].on);
468
469 /* enable power */
470 rc = msm_pcie_vreg_init(&dev->pdev->dev);
471 if (rc)
472 goto out;
473
474 /* assert PCIe PARF reset while powering the core */
475 msm_pcie_write_mask(PCIE_RESET, 0, BIT(2));
476
477 /* enable clocks */
478 rc = msm_pcie_clk_init(&dev->pdev->dev);
479 if (rc)
480 goto clk_fail;
481
482 /* enable pcie power; wait 3ms for clock to stabilize */
483 gpio_set_value_cansleep(dev->gpio[MSM_PCIE_GPIO_PWR_EN].num,
484 dev->gpio[MSM_PCIE_GPIO_PWR_EN].on);
485 usleep(3000);
486
487 /*
488 * de-assert PCIe PARF reset;
489 * wait 1us before accessing PARF registers
490 */
491 msm_pcie_write_mask(PCIE_RESET, BIT(2), 0);
492 udelay(1);
493
494 /* enable PCIe clocks and resets */
495 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
496
497 /* PARF programming */
498 writel_relaxed(0x282828, dev->parf + PCIE20_PARF_PCS_DEEMPH);
499 writel_relaxed(0x7F7F, dev->parf + PCIE20_PARF_PCS_SWING);
500 writel_relaxed((4<<24), dev->parf + PCIE20_PARF_CONFIG_BITS);
501 /* ensure that hardware registers the PARF configuration */
502 wmb();
503
504 /* enable reference clock */
505 msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_REFCLK, 0, BIT(16));
506
507 /* enable access to PCIe slave port on system fabric */
508 writel_relaxed(BIT(4), PCIE_SFAB_AXI_S5_FCLK_CTL);
509 /* ensure that access is enabled before proceeding */
510 wmb();
511
512 /* de-assert PICe PHY, Core, POR and AXI clk domain resets */
513 msm_pcie_write_mask(PCIE_RESET, BIT(5), 0);
514 msm_pcie_write_mask(PCIE_RESET, BIT(4), 0);
515 msm_pcie_write_mask(PCIE_RESET, BIT(3), 0);
516 msm_pcie_write_mask(PCIE_RESET, BIT(0), 0);
517
518 /* wait 150ms for clock acquisition */
519 udelay(150);
520
521 /* de-assert PCIe reset link to bring EP out of reset */
522 gpio_set_value_cansleep(dev->gpio[MSM_PCIE_GPIO_RST_N].num,
523 !dev->gpio[MSM_PCIE_GPIO_RST_N].on);
524
525 /* enable link training */
526 msm_pcie_write_mask(dev->elbi + PCIE20_ELBI_SYS_CTRL, 0, BIT(0));
527
528 /* poll for link to come up for upto 100ms */
529 rc = readl_poll_timeout(
530 (msm_pcie_dev.pcie20 + PCIE20_CAP_LINKCTRLSTATUS),
531 val, (val & BIT(29)), 10000, 100000);
532 if (rc) {
533 pr_err("link initialization failed\n");
534 goto link_fail;
535 } else
536 pr_info("link initialized\n");
537
538 msm_pcie_config_controller();
539 rc = msm_pcie_irq_init(dev);
540 if (!rc)
541 goto out;
542
543link_fail:
544 msm_pcie_clk_deinit();
545clk_fail:
546 msm_pcie_vreg_deinit();
547out:
548 return (rc) ? 0 : 1;
549}
550
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600551static struct pci_bus __init *msm_pcie_scan_bus(int nr,
552 struct pci_sys_data *sys)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600553{
554 struct pci_bus *bus = NULL;
555
556 PCIE_DBG("bus %d\n", nr);
557 if (nr == 0)
558 bus = pci_scan_bus(sys->busnr, &msm_pcie_ops, sys);
559
560 return bus;
561}
562
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600563static int __init msm_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600564{
565 PCIE_DBG("slot %d pin %d\n", slot, pin);
566 return (pin <= 4) ? (PCIE20_INTA + pin - 1) : 0;
567}
568
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600569static struct hw_pci msm_pci __initdata = {
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600570 .nr_controllers = 1,
571 .swizzle = pci_std_swizzle,
572 .setup = msm_pcie_setup,
573 .scan = msm_pcie_scan_bus,
574 .map_irq = msm_pcie_map_irq,
575};
576
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600577static int __init msm_pcie_probe(struct platform_device *pdev)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600578{
579 const struct msm_pcie_platform *pdata;
580 int rc;
581
582 PCIE_DBG("\n");
583
584 msm_pcie_dev.pdev = pdev;
585 pdata = pdev->dev.platform_data;
586 msm_pcie_dev.gpio = pdata->gpio;
587 msm_pcie_dev.vreg = msm_pcie_vreg_info;
588 msm_pcie_dev.clk = msm_pcie_clk_info;
589 msm_pcie_dev.res = msm_pcie_res_info;
590
591 rc = msm_pcie_get_resources(msm_pcie_dev.pdev);
592 if (rc)
593 return rc;
594
595 rc = msm_pcie_gpio_init();
596 if (rc) {
597 msm_pcie_release_resources();
598 return rc;
599 }
600
601 /* kick start ARM PCI configuration framework */
602 pci_common_init(&msm_pci);
603 return 0;
604}
605
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600606static int __exit msm_pcie_remove(struct platform_device *pdev)
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600607{
608 PCIE_DBG("\n");
609
610 msm_pcie_irq_deinit(&msm_pcie_dev);
611 msm_pcie_vreg_deinit();
612 msm_pcie_clk_deinit();
613 msm_pcie_gpio_deinit();
614 msm_pcie_release_resources();
615
616 msm_pcie_dev.pdev = NULL;
617 msm_pcie_dev.vreg = NULL;
618 msm_pcie_dev.clk = NULL;
619 msm_pcie_dev.gpio = NULL;
620 return 0;
621}
622
623static struct platform_driver msm_pcie_driver = {
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600624 .remove = __exit_p(msm_pcie_remove),
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600625 .driver = {
626 .name = "msm_pcie",
627 .owner = THIS_MODULE,
628 },
629};
630
631static int __init msm_pcie_init(void)
632{
633 PCIE_DBG("\n");
Niranjana Vishwanathapurad3803302012-05-25 15:13:09 -0600634 return platform_driver_probe(&msm_pcie_driver, msm_pcie_probe);
Niranjana Vishwanathapuraf1427ac2012-05-03 14:28:21 -0600635}
636subsys_initcall(msm_pcie_init);
637
638/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
639static void __devinit msm_pcie_fixup_header(struct pci_dev *dev)
640{
641 PCIE_DBG("hdr_type %d\n", dev->hdr_type);
642 if (dev->hdr_type == 1)
643 dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
644}
645DECLARE_PCI_FIXUP_HEADER(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
646 msm_pcie_fixup_header);
647
648/*
649 * actual physical (BAR) address of the device resources starts from 0x10xxxxxx;
650 * the system axi address for the device resources starts from 0x08xxxxxx;
651 * correct the device resource structure here; address translation unit handles
652 * the required translations
653 */
654static void __devinit msm_pcie_fixup_final(struct pci_dev *dev)
655{
656 int i;
657
658 PCIE_DBG("vendor 0x%x 0x%x\n", dev->vendor, dev->device);
659 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
660 if (dev->resource[i].start & 0xFF000000) {
661 dev->resource[i].start &= 0x00FFFFFF;
662 dev->resource[i].start |= 0x08000000;
663 dev->resource[i].end &= 0x00FFFFFF;
664 dev->resource[i].end |= 0x08000000;
665 }
666 }
667}
668DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, msm_pcie_fixup_final);