blob: 77b0c257f215f1e4913a2abdc349d16bfbc6b9a8 [file] [log] [blame]
Jingoo Han340cba62013-06-21 16:24:54 +09001/*
Jingoo Han4b1ced82013-07-31 17:14:10 +09002 * Synopsys Designware PCIe host controller driver
Jingoo Han340cba62013-06-21 16:24:54 +09003 *
4 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Jingoo Han <jg1.han@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
Jingoo Han340cba62013-06-21 16:24:54 +090014#include <linux/kernel.h>
Jingoo Han340cba62013-06-21 16:24:54 +090015#include <linux/module.h>
Jingoo Han340cba62013-06-21 16:24:54 +090016#include <linux/of_address.h>
Jingoo Han340cba62013-06-21 16:24:54 +090017#include <linux/pci.h>
18#include <linux/pci_regs.h>
Jingoo Han340cba62013-06-21 16:24:54 +090019#include <linux/types.h>
20
Jingoo Han4b1ced82013-07-31 17:14:10 +090021#include "pcie-designware.h"
Jingoo Han340cba62013-06-21 16:24:54 +090022
23/* Synopsis specific PCIE configuration registers */
24#define PCIE_PORT_LINK_CONTROL 0x710
25#define PORT_LINK_MODE_MASK (0x3f << 16)
Jingoo Han4b1ced82013-07-31 17:14:10 +090026#define PORT_LINK_MODE_1_LANES (0x1 << 16)
27#define PORT_LINK_MODE_2_LANES (0x3 << 16)
Jingoo Han340cba62013-06-21 16:24:54 +090028#define PORT_LINK_MODE_4_LANES (0x7 << 16)
29
30#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
31#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
32#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
Jingoo Han4b1ced82013-07-31 17:14:10 +090033#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
34#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
35#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
Jingoo Han340cba62013-06-21 16:24:54 +090036
37#define PCIE_MSI_ADDR_LO 0x820
38#define PCIE_MSI_ADDR_HI 0x824
39#define PCIE_MSI_INTR0_ENABLE 0x828
40#define PCIE_MSI_INTR0_MASK 0x82C
41#define PCIE_MSI_INTR0_STATUS 0x830
42
43#define PCIE_ATU_VIEWPORT 0x900
44#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
45#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
46#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
47#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
48#define PCIE_ATU_CR1 0x904
49#define PCIE_ATU_TYPE_MEM (0x0 << 0)
50#define PCIE_ATU_TYPE_IO (0x2 << 0)
51#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
52#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
53#define PCIE_ATU_CR2 0x908
54#define PCIE_ATU_ENABLE (0x1 << 31)
55#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
56#define PCIE_ATU_LOWER_BASE 0x90C
57#define PCIE_ATU_UPPER_BASE 0x910
58#define PCIE_ATU_LIMIT 0x914
59#define PCIE_ATU_LOWER_TARGET 0x918
60#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
61#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
62#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
63#define PCIE_ATU_UPPER_TARGET 0x91C
64
Jingoo Han4b1ced82013-07-31 17:14:10 +090065static struct hw_pci dw_pci;
Jingoo Han340cba62013-06-21 16:24:54 +090066
Jingoo Han4b1ced82013-07-31 17:14:10 +090067unsigned long global_io_offset;
Jingoo Han340cba62013-06-21 16:24:54 +090068
69static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
70{
71 return sys->private_data;
72}
73
Jingoo Han4b1ced82013-07-31 17:14:10 +090074int cfg_read(void __iomem *addr, int where, int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +090075{
76 *val = readl(addr);
77
78 if (size == 1)
79 *val = (*val >> (8 * (where & 3))) & 0xff;
80 else if (size == 2)
81 *val = (*val >> (8 * (where & 3))) & 0xffff;
82 else if (size != 4)
83 return PCIBIOS_BAD_REGISTER_NUMBER;
84
85 return PCIBIOS_SUCCESSFUL;
86}
87
Jingoo Han4b1ced82013-07-31 17:14:10 +090088int cfg_write(void __iomem *addr, int where, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +090089{
90 if (size == 4)
91 writel(val, addr);
92 else if (size == 2)
93 writew(val, addr + (where & 2));
94 else if (size == 1)
95 writeb(val, addr + (where & 3));
96 else
97 return PCIBIOS_BAD_REGISTER_NUMBER;
98
99 return PCIBIOS_SUCCESSFUL;
100}
101
Jingoo Han4b1ced82013-07-31 17:14:10 +0900102static inline void dw_pcie_readl_rc(struct pcie_port *pp,
103 void __iomem *dbi_addr, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900104{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900105 if (pp->ops->readl_rc)
106 pp->ops->readl_rc(pp, dbi_addr, val);
107 else
108 *val = readl(dbi_addr);
Jingoo Han340cba62013-06-21 16:24:54 +0900109}
110
Jingoo Han4b1ced82013-07-31 17:14:10 +0900111static inline void dw_pcie_writel_rc(struct pcie_port *pp,
112 u32 val, void __iomem *dbi_addr)
Jingoo Han340cba62013-06-21 16:24:54 +0900113{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900114 if (pp->ops->writel_rc)
115 pp->ops->writel_rc(pp, val, dbi_addr);
116 else
117 writel(val, dbi_addr);
Jingoo Han340cba62013-06-21 16:24:54 +0900118}
119
Jingoo Han4b1ced82013-07-31 17:14:10 +0900120int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
Jingoo Han340cba62013-06-21 16:24:54 +0900121 u32 *val)
122{
123 int ret;
124
Jingoo Han4b1ced82013-07-31 17:14:10 +0900125 if (pp->ops->rd_own_conf)
126 ret = pp->ops->rd_own_conf(pp, where, size, val);
127 else
128 ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
129
Jingoo Han340cba62013-06-21 16:24:54 +0900130 return ret;
131}
132
Jingoo Han4b1ced82013-07-31 17:14:10 +0900133int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
Jingoo Han340cba62013-06-21 16:24:54 +0900134 u32 val)
135{
136 int ret;
137
Jingoo Han4b1ced82013-07-31 17:14:10 +0900138 if (pp->ops->wr_own_conf)
139 ret = pp->ops->wr_own_conf(pp, where, size, val);
Jingoo Han340cba62013-06-21 16:24:54 +0900140 else
Jingoo Han4b1ced82013-07-31 17:14:10 +0900141 ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size,
142 val);
Jingoo Han340cba62013-06-21 16:24:54 +0900143
144 return ret;
145}
146
Jingoo Han4b1ced82013-07-31 17:14:10 +0900147int dw_pcie_link_up(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900148{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900149 if (pp->ops->link_up)
150 return pp->ops->link_up(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900151 else
Jingoo Han340cba62013-06-21 16:24:54 +0900152 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900153}
154
Jingoo Han4b1ced82013-07-31 17:14:10 +0900155int __init dw_pcie_host_init(struct pcie_port *pp)
Jingoo Han340cba62013-06-21 16:24:54 +0900156{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900157 struct device_node *np = pp->dev->of_node;
Jingoo Han340cba62013-06-21 16:24:54 +0900158 struct of_pci_range range;
159 struct of_pci_range_parser parser;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900160 u32 val;
Jingoo Han340cba62013-06-21 16:24:54 +0900161
162 if (of_pci_range_parser_init(&parser, np)) {
Jingoo Han4b1ced82013-07-31 17:14:10 +0900163 dev_err(pp->dev, "missing ranges property\n");
Jingoo Han340cba62013-06-21 16:24:54 +0900164 return -EINVAL;
165 }
166
167 /* Get the I/O and memory ranges from DT */
168 for_each_of_pci_range(&parser, &range) {
169 unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
170 if (restype == IORESOURCE_IO) {
171 of_pci_range_to_resource(&range, np, &pp->io);
172 pp->io.name = "I/O";
173 pp->io.start = max_t(resource_size_t,
174 PCIBIOS_MIN_IO,
175 range.pci_addr + global_io_offset);
176 pp->io.end = min_t(resource_size_t,
177 IO_SPACE_LIMIT,
178 range.pci_addr + range.size
179 + global_io_offset);
180 pp->config.io_size = resource_size(&pp->io);
181 pp->config.io_bus_addr = range.pci_addr;
182 }
183 if (restype == IORESOURCE_MEM) {
184 of_pci_range_to_resource(&range, np, &pp->mem);
185 pp->mem.name = "MEM";
186 pp->config.mem_size = resource_size(&pp->mem);
187 pp->config.mem_bus_addr = range.pci_addr;
188 }
189 if (restype == 0) {
190 of_pci_range_to_resource(&range, np, &pp->cfg);
191 pp->config.cfg0_size = resource_size(&pp->cfg)/2;
192 pp->config.cfg1_size = resource_size(&pp->cfg)/2;
193 }
194 }
195
Jingoo Han4b1ced82013-07-31 17:14:10 +0900196 if (!pp->dbi_base) {
197 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
198 resource_size(&pp->cfg));
199 if (!pp->dbi_base) {
200 dev_err(pp->dev, "error with ioremap\n");
201 return -ENOMEM;
202 }
Jingoo Han340cba62013-06-21 16:24:54 +0900203 }
Jingoo Han340cba62013-06-21 16:24:54 +0900204
Jingoo Han4b1ced82013-07-31 17:14:10 +0900205 pp->cfg0_base = pp->cfg.start;
206 pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
207 pp->io_base = pp->io.start;
208 pp->mem_base = pp->mem.start;
209
210 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
211 pp->config.cfg0_size);
212 if (!pp->va_cfg0_base) {
213 dev_err(pp->dev, "error with ioremap in function\n");
214 return -ENOMEM;
Jingoo Han340cba62013-06-21 16:24:54 +0900215 }
Jingoo Han4b1ced82013-07-31 17:14:10 +0900216 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
217 pp->config.cfg1_size);
218 if (!pp->va_cfg1_base) {
219 dev_err(pp->dev, "error with ioremap\n");
220 return -ENOMEM;
221 }
Jingoo Han340cba62013-06-21 16:24:54 +0900222
Jingoo Han4b1ced82013-07-31 17:14:10 +0900223 if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
224 dev_err(pp->dev, "Failed to parse the number of lanes\n");
225 return -EINVAL;
226 }
Jingoo Han340cba62013-06-21 16:24:54 +0900227
Jingoo Han4b1ced82013-07-31 17:14:10 +0900228 if (pp->ops->host_init)
229 pp->ops->host_init(pp);
Jingoo Han340cba62013-06-21 16:24:54 +0900230
Jingoo Han4b1ced82013-07-31 17:14:10 +0900231 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
232
233 /* program correct class for RC */
234 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
235
236 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
237 val |= PORT_LOGIC_SPEED_CHANGE;
238 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
239
240 dw_pci.nr_controllers = 1;
241 dw_pci.private_data = (void **)&pp;
242
243 pci_common_init(&dw_pci);
Jingoo Han340cba62013-06-21 16:24:54 +0900244 pci_assign_unassigned_resources();
245#ifdef CONFIG_PCI_DOMAINS
Jingoo Han4b1ced82013-07-31 17:14:10 +0900246 dw_pci.domain++;
Jingoo Han340cba62013-06-21 16:24:54 +0900247#endif
248
Jingoo Han340cba62013-06-21 16:24:54 +0900249 return 0;
Jingoo Han4b1ced82013-07-31 17:14:10 +0900250}
Jingoo Han340cba62013-06-21 16:24:54 +0900251
Jingoo Han4b1ced82013-07-31 17:14:10 +0900252static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
253{
254 u32 val;
255 void __iomem *dbi_base = pp->dbi_base;
256
257 /* Program viewport 0 : OUTBOUND : CFG0 */
258 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
259 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
260 dw_pcie_writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE);
261 dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32),
262 dbi_base + PCIE_ATU_UPPER_BASE);
263 dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
264 dbi_base + PCIE_ATU_LIMIT);
265 dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
266 dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
267 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1);
268 val = PCIE_ATU_ENABLE;
269 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
270}
271
272static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
273{
274 u32 val;
275 void __iomem *dbi_base = pp->dbi_base;
276
277 /* Program viewport 1 : OUTBOUND : CFG1 */
278 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
279 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
280 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1);
281 val = PCIE_ATU_ENABLE;
282 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
283 dw_pcie_writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE);
284 dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32),
285 dbi_base + PCIE_ATU_UPPER_BASE);
286 dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
287 dbi_base + PCIE_ATU_LIMIT);
288 dw_pcie_writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
289 dw_pcie_writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
290}
291
292static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
293{
294 u32 val;
295 void __iomem *dbi_base = pp->dbi_base;
296
297 /* Program viewport 0 : OUTBOUND : MEM */
298 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
299 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
300 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1);
301 val = PCIE_ATU_ENABLE;
302 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
303 dw_pcie_writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE);
304 dw_pcie_writel_rc(pp, (pp->mem_base >> 32),
305 dbi_base + PCIE_ATU_UPPER_BASE);
306 dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
307 dbi_base + PCIE_ATU_LIMIT);
308 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr,
309 dbi_base + PCIE_ATU_LOWER_TARGET);
310 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
311 dbi_base + PCIE_ATU_UPPER_TARGET);
312}
313
314static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
315{
316 u32 val;
317 void __iomem *dbi_base = pp->dbi_base;
318
319 /* Program viewport 1 : OUTBOUND : IO */
320 val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
321 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
322 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1);
323 val = PCIE_ATU_ENABLE;
324 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
325 dw_pcie_writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE);
326 dw_pcie_writel_rc(pp, (pp->io_base >> 32),
327 dbi_base + PCIE_ATU_UPPER_BASE);
328 dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
329 dbi_base + PCIE_ATU_LIMIT);
330 dw_pcie_writel_rc(pp, pp->config.io_bus_addr,
331 dbi_base + PCIE_ATU_LOWER_TARGET);
332 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
333 dbi_base + PCIE_ATU_UPPER_TARGET);
334}
335
336static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
337 u32 devfn, int where, int size, u32 *val)
338{
339 int ret = PCIBIOS_SUCCESSFUL;
340 u32 address, busdev;
341
342 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
343 PCIE_ATU_FUNC(PCI_FUNC(devfn));
344 address = where & ~0x3;
345
346 if (bus->parent->number == pp->root_bus_nr) {
347 dw_pcie_prog_viewport_cfg0(pp, busdev);
348 ret = cfg_read(pp->va_cfg0_base + address, where, size, val);
349 dw_pcie_prog_viewport_mem_outbound(pp);
350 } else {
351 dw_pcie_prog_viewport_cfg1(pp, busdev);
352 ret = cfg_read(pp->va_cfg1_base + address, where, size, val);
353 dw_pcie_prog_viewport_io_outbound(pp);
354 }
355
Jingoo Han340cba62013-06-21 16:24:54 +0900356 return ret;
357}
358
Jingoo Han4b1ced82013-07-31 17:14:10 +0900359static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
360 u32 devfn, int where, int size, u32 val)
Jingoo Han340cba62013-06-21 16:24:54 +0900361{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900362 int ret = PCIBIOS_SUCCESSFUL;
363 u32 address, busdev;
Jingoo Han340cba62013-06-21 16:24:54 +0900364
Jingoo Han4b1ced82013-07-31 17:14:10 +0900365 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
366 PCIE_ATU_FUNC(PCI_FUNC(devfn));
367 address = where & ~0x3;
Jingoo Han340cba62013-06-21 16:24:54 +0900368
Jingoo Han4b1ced82013-07-31 17:14:10 +0900369 if (bus->parent->number == pp->root_bus_nr) {
370 dw_pcie_prog_viewport_cfg0(pp, busdev);
371 ret = cfg_write(pp->va_cfg0_base + address, where, size, val);
372 dw_pcie_prog_viewport_mem_outbound(pp);
373 } else {
374 dw_pcie_prog_viewport_cfg1(pp, busdev);
375 ret = cfg_write(pp->va_cfg1_base + address, where, size, val);
376 dw_pcie_prog_viewport_io_outbound(pp);
377 }
378
379 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900380}
381
Jingoo Han340cba62013-06-21 16:24:54 +0900382
Jingoo Han4b1ced82013-07-31 17:14:10 +0900383static int dw_pcie_valid_config(struct pcie_port *pp,
384 struct pci_bus *bus, int dev)
Jingoo Han340cba62013-06-21 16:24:54 +0900385{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900386 /* If there is no link, then there is no device */
387 if (bus->number != pp->root_bus_nr) {
388 if (!dw_pcie_link_up(pp))
389 return 0;
390 }
Jingoo Han340cba62013-06-21 16:24:54 +0900391
Jingoo Han4b1ced82013-07-31 17:14:10 +0900392 /* access only one slot on each root port */
393 if (bus->number == pp->root_bus_nr && dev > 0)
394 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900395
396 /*
Jingoo Han4b1ced82013-07-31 17:14:10 +0900397 * do not read more than one device on the bus directly attached
398 * to RC's (Virtual Bridge's) DS side.
Jingoo Han340cba62013-06-21 16:24:54 +0900399 */
Jingoo Han4b1ced82013-07-31 17:14:10 +0900400 if (bus->primary == pp->root_bus_nr && dev > 0)
Jingoo Han340cba62013-06-21 16:24:54 +0900401 return 0;
Jingoo Han340cba62013-06-21 16:24:54 +0900402
403 return 1;
404}
405
Jingoo Han4b1ced82013-07-31 17:14:10 +0900406static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
407 int size, u32 *val)
Jingoo Han340cba62013-06-21 16:24:54 +0900408{
Jingoo Han4b1ced82013-07-31 17:14:10 +0900409 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
410 unsigned long flags;
411 int ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900412
Jingoo Han4b1ced82013-07-31 17:14:10 +0900413 if (!pp) {
414 BUG();
415 return -EINVAL;
416 }
Jingoo Han340cba62013-06-21 16:24:54 +0900417
Jingoo Han4b1ced82013-07-31 17:14:10 +0900418 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
419 *val = 0xffffffff;
420 return PCIBIOS_DEVICE_NOT_FOUND;
421 }
422
423 spin_lock_irqsave(&pp->conf_lock, flags);
424 if (bus->number != pp->root_bus_nr)
425 ret = dw_pcie_rd_other_conf(pp, bus, devfn,
426 where, size, val);
427 else
428 ret = dw_pcie_rd_own_conf(pp, where, size, val);
429 spin_unlock_irqrestore(&pp->conf_lock, flags);
430
431 return ret;
Jingoo Han340cba62013-06-21 16:24:54 +0900432}
Jingoo Han4b1ced82013-07-31 17:14:10 +0900433
434static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
435 int where, int size, u32 val)
436{
437 struct pcie_port *pp = sys_to_pcie(bus->sysdata);
438 unsigned long flags;
439 int ret;
440
441 if (!pp) {
442 BUG();
443 return -EINVAL;
444 }
445
446 if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
447 return PCIBIOS_DEVICE_NOT_FOUND;
448
449 spin_lock_irqsave(&pp->conf_lock, flags);
450 if (bus->number != pp->root_bus_nr)
451 ret = dw_pcie_wr_other_conf(pp, bus, devfn,
452 where, size, val);
453 else
454 ret = dw_pcie_wr_own_conf(pp, where, size, val);
455 spin_unlock_irqrestore(&pp->conf_lock, flags);
456
457 return ret;
458}
459
460static struct pci_ops dw_pcie_ops = {
461 .read = dw_pcie_rd_conf,
462 .write = dw_pcie_wr_conf,
463};
464
465int dw_pcie_setup(int nr, struct pci_sys_data *sys)
466{
467 struct pcie_port *pp;
468
469 pp = sys_to_pcie(sys);
470
471 if (!pp)
472 return 0;
473
474 if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
475 sys->io_offset = global_io_offset - pp->config.io_bus_addr;
476 pci_ioremap_io(sys->io_offset, pp->io.start);
477 global_io_offset += SZ_64K;
478 pci_add_resource_offset(&sys->resources, &pp->io,
479 sys->io_offset);
480 }
481
482 sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
483 pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
484
485 return 1;
486}
487
488struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
489{
490 struct pci_bus *bus;
491 struct pcie_port *pp = sys_to_pcie(sys);
492
493 if (pp) {
494 pp->root_bus_nr = sys->busnr;
495 bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops,
496 sys, &sys->resources);
497 } else {
498 bus = NULL;
499 BUG();
500 }
501
502 return bus;
503}
504
505int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
506{
507 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
508
509 return pp->irq;
510}
511
512static struct hw_pci dw_pci = {
513 .setup = dw_pcie_setup,
514 .scan = dw_pcie_scan_bus,
515 .map_irq = dw_pcie_map_irq,
516};
517
518void dw_pcie_setup_rc(struct pcie_port *pp)
519{
520 struct pcie_port_info *config = &pp->config;
521 void __iomem *dbi_base = pp->dbi_base;
522 u32 val;
523 u32 membase;
524 u32 memlimit;
525
526 /* set the number of lines as 4 */
527 dw_pcie_readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val);
528 val &= ~PORT_LINK_MODE_MASK;
529 switch (pp->lanes) {
530 case 1:
531 val |= PORT_LINK_MODE_1_LANES;
532 break;
533 case 2:
534 val |= PORT_LINK_MODE_2_LANES;
535 break;
536 case 4:
537 val |= PORT_LINK_MODE_4_LANES;
538 break;
539 }
540 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL);
541
542 /* set link width speed control register */
543 dw_pcie_readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
544 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
545 switch (pp->lanes) {
546 case 1:
547 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
548 break;
549 case 2:
550 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
551 break;
552 case 4:
553 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
554 break;
555 }
556 dw_pcie_writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
557
558 /* setup RC BARs */
559 dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0);
560 dw_pcie_writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1);
561
562 /* setup interrupt pins */
563 dw_pcie_readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val);
564 val &= 0xffff00ff;
565 val |= 0x00000100;
566 dw_pcie_writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE);
567
568 /* setup bus numbers */
569 dw_pcie_readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val);
570 val &= 0xff000000;
571 val |= 0x00010100;
572 dw_pcie_writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS);
573
574 /* setup memory base, memory limit */
575 membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
576 memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
577 val = memlimit | membase;
578 dw_pcie_writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE);
579
580 /* setup command register */
581 dw_pcie_readl_rc(pp, dbi_base + PCI_COMMAND, &val);
582 val &= 0xffff0000;
583 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
584 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
585 dw_pcie_writel_rc(pp, val, dbi_base + PCI_COMMAND);
586}
Jingoo Han340cba62013-06-21 16:24:54 +0900587
588MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
Jingoo Han4b1ced82013-07-31 17:14:10 +0900589MODULE_DESCRIPTION("Designware PCIe host controller driver");
Jingoo Han340cba62013-06-21 16:24:54 +0900590MODULE_LICENSE("GPL v2");