blob: fe26df7cf5cd5eebe177b6b6df5e1bca1538c283 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
David Brownell075c1772007-04-26 00:12:06 -070014#include <linux/pm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
17#include <linux/spinlock.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080018#include <linux/string.h>
vignesh babu229f5af2007-08-13 18:23:14 +053019#include <linux/log2.h>
Shaohua Li7d715a62008-02-25 09:46:41 +080020#include <linux/pci-aspm.h>
Stephen Rothwellc300bd2fb2008-07-10 02:16:44 +020021#include <linux/pm_wakeup.h>
Sheng Yang8dd7f802008-10-21 17:38:25 +080022#include <linux/interrupt.h>
Yuji Shimada32a9a682009-03-16 17:13:39 +090023#include <linux/device.h>
Rafael J. Wysockib67ea762010-02-17 23:44:09 +010024#include <linux/pm_runtime.h>
Bjorn Helgaas284f5f92012-04-30 15:21:02 -060025#include <asm-generic/pci-bridge.h>
Yuji Shimada32a9a682009-03-16 17:13:39 +090026#include <asm/setup.h>
Greg KHbc56b9e2005-04-08 14:53:31 +090027#include "pci.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Alan Stern00240c32009-04-27 13:33:16 -040029const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31};
32EXPORT_SYMBOL_GPL(pci_power_names);
33
Rafael J. Wysocki93177a72010-01-02 22:57:24 +010034int isa_dma_bridge_buggy;
35EXPORT_SYMBOL(isa_dma_bridge_buggy);
36
37int pci_pci_problems;
38EXPORT_SYMBOL(pci_pci_problems);
39
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +010040unsigned int pci_pm_d3_delay;
41
Matthew Garrettdf17e622010-10-04 14:22:29 -040042static void pci_pme_list_scan(struct work_struct *work);
43
44static LIST_HEAD(pci_pme_list);
45static DEFINE_MUTEX(pci_pme_list_mutex);
46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47
48struct pci_pme_device {
49 struct list_head list;
50 struct pci_dev *dev;
51};
52
53#define PME_TIMEOUT 1000 /* How long between PME checks */
54
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +010055static void pci_dev_d3_sleep(struct pci_dev *dev)
56{
57 unsigned int delay = dev->d3_delay;
58
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
61
62 msleep(delay);
63}
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Jeff Garzik32a2eea2007-10-11 16:57:27 -040065#ifdef CONFIG_PCI_DOMAINS
66int pci_domains_supported = 1;
67#endif
68
Atsushi Nemoto4516a612007-02-05 16:36:06 -080069#define DEFAULT_CARDBUS_IO_SIZE (256)
70#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74
Eric W. Biederman28760482009-09-09 14:09:24 -070075#define DEFAULT_HOTPLUG_IO_SIZE (256)
76#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
78unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80
Jon Mason5f39e672011-10-03 09:50:20 -050081enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
Jon Masonb03e7492011-07-20 15:20:54 -050082
Jesse Barnesac1aa472009-10-26 13:20:44 -070083/*
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
88 */
Tejun Heo98e724c2009-10-08 18:59:53 +090089u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
Jesse Barnesac1aa472009-10-26 13:20:44 -070090u8 pci_cache_line_size;
91
Myron Stowe96c55902011-10-28 15:48:38 -060092/*
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
95 */
96unsigned int pcibios_max_latency = 255;
97
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +010098/* If set, the PCIe ARI capability will not be used. */
99static bool pcie_ari_disabled;
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/**
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
104 *
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
107 */
Sam Ravnborg96bde062007-03-26 21:53:30 -0800108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 struct list_head *tmp;
111 unsigned char max, n;
112
Kristen Accardib82db5c2006-01-17 16:56:56 -0800113 max = bus->subordinate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max)
117 max = n;
118 }
119 return max;
120}
Kristen Accardib82db5c2006-01-17 16:56:56 -0800121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Andrew Morton1684f5d2008-12-01 14:30:30 -0800123#ifdef CONFIG_HAS_IOMEM
124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125{
126 /*
127 * Make sure the BAR is actually a memory resource, not an IO resource
128 */
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 WARN_ON(1);
131 return NULL;
132 }
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
135}
136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif
138
Kristen Accardib82db5c2006-01-17 16:56:56 -0800139#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/**
141 * pci_max_busnr - returns maximum PCI bus number
142 *
143 * Returns the highest PCI bus number present in the system global list of
144 * PCI buses.
145 */
146unsigned char __devinit
147pci_max_busnr(void)
148{
149 struct pci_bus *bus = NULL;
150 unsigned char max, n;
151
152 max = 0;
153 while ((bus = pci_find_next_bus(bus)) != NULL) {
154 n = pci_bus_max_busnr(bus);
155 if(n > max)
156 max = n;
157 }
158 return max;
159}
160
Adrian Bunk54c762f2005-12-22 01:08:52 +0100161#endif /* 0 */
162
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100163#define PCI_FIND_CAP_TTL 48
164
165static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
166 u8 pos, int cap, int *ttl)
Roland Dreier24a4e372005-10-28 17:35:34 -0700167{
168 u8 id;
Roland Dreier24a4e372005-10-28 17:35:34 -0700169
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100170 while ((*ttl)--) {
Roland Dreier24a4e372005-10-28 17:35:34 -0700171 pci_bus_read_config_byte(bus, devfn, pos, &pos);
172 if (pos < 0x40)
173 break;
174 pos &= ~3;
175 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
176 &id);
177 if (id == 0xff)
178 break;
179 if (id == cap)
180 return pos;
181 pos += PCI_CAP_LIST_NEXT;
182 }
183 return 0;
184}
185
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100186static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
187 u8 pos, int cap)
188{
189 int ttl = PCI_FIND_CAP_TTL;
190
191 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
192}
193
Roland Dreier24a4e372005-10-28 17:35:34 -0700194int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
195{
196 return __pci_find_next_cap(dev->bus, dev->devfn,
197 pos + PCI_CAP_LIST_NEXT, cap);
198}
199EXPORT_SYMBOL_GPL(pci_find_next_capability);
200
Michael Ellermand3bac112006-11-22 18:26:16 +1100201static int __pci_bus_find_cap_start(struct pci_bus *bus,
202 unsigned int devfn, u8 hdr_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 u16 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
207 if (!(status & PCI_STATUS_CAP_LIST))
208 return 0;
209
210 switch (hdr_type) {
211 case PCI_HEADER_TYPE_NORMAL:
212 case PCI_HEADER_TYPE_BRIDGE:
Michael Ellermand3bac112006-11-22 18:26:16 +1100213 return PCI_CAPABILITY_LIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 case PCI_HEADER_TYPE_CARDBUS:
Michael Ellermand3bac112006-11-22 18:26:16 +1100215 return PCI_CB_CAPABILITY_LIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 default:
217 return 0;
218 }
Michael Ellermand3bac112006-11-22 18:26:16 +1100219
220 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
223/**
224 * pci_find_capability - query for devices' capabilities
225 * @dev: PCI device to query
226 * @cap: capability code
227 *
228 * Tell if a device supports a given PCI capability.
229 * Returns the address of the requested capability structure within the
230 * device's PCI configuration space or 0 in case the device does not
231 * support it. Possible values for @cap:
232 *
233 * %PCI_CAP_ID_PM Power Management
234 * %PCI_CAP_ID_AGP Accelerated Graphics Port
235 * %PCI_CAP_ID_VPD Vital Product Data
236 * %PCI_CAP_ID_SLOTID Slot Identification
237 * %PCI_CAP_ID_MSI Message Signalled Interrupts
238 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
239 * %PCI_CAP_ID_PCIX PCI-X
240 * %PCI_CAP_ID_EXP PCI Express
241 */
242int pci_find_capability(struct pci_dev *dev, int cap)
243{
Michael Ellermand3bac112006-11-22 18:26:16 +1100244 int pos;
245
246 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
247 if (pos)
248 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
249
250 return pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
252
253/**
254 * pci_bus_find_capability - query for devices' capabilities
255 * @bus: the PCI bus to query
256 * @devfn: PCI device to query
257 * @cap: capability code
258 *
259 * Like pci_find_capability() but works for pci devices that do not have a
260 * pci_dev structure set up yet.
261 *
262 * Returns the address of the requested capability structure within the
263 * device's PCI configuration space or 0 in case the device does not
264 * support it.
265 */
266int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
267{
Michael Ellermand3bac112006-11-22 18:26:16 +1100268 int pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 u8 hdr_type;
270
271 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
272
Michael Ellermand3bac112006-11-22 18:26:16 +1100273 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
274 if (pos)
275 pos = __pci_find_next_cap(bus, devfn, pos, cap);
276
277 return pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279
280/**
Myron Stowec463b8c2012-06-01 15:16:37 -0600281 * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure
282 * @dev: PCI device to check
283 *
284 * Like pci_pcie_cap() but also checks that the PCIe capability version is
285 * >= 2. Note that v1 capability structures could be sparse in that not
286 * all register fields were required. v2 requires the entire structure to
287 * be present size wise, while still allowing for non-implemented registers
288 * to exist but they must be hardwired to 0.
289 *
290 * Due to the differences in the versions of capability structures, one
291 * must be careful not to try and access non-existant registers that may
292 * exist in early versions - v1 - of Express devices.
293 *
294 * Returns the offset of the PCIe capability structure as long as the
295 * capability version is >= 2; otherwise 0 is returned.
296 */
297static int pci_pcie_cap2(struct pci_dev *dev)
298{
299 u16 flags;
300 int pos;
301
302 pos = pci_pcie_cap(dev);
303 if (pos) {
304 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
305 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
306 pos = 0;
307 }
308
309 return pos;
310}
311
312/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 * pci_find_ext_capability - Find an extended capability
314 * @dev: PCI device to query
315 * @cap: capability code
316 *
317 * Returns the address of the requested extended capability structure
318 * within the device's PCI configuration space or 0 if the device does
319 * not support it. Possible values for @cap:
320 *
321 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
322 * %PCI_EXT_CAP_ID_VC Virtual Channel
323 * %PCI_EXT_CAP_ID_DSN Device Serial Number
324 * %PCI_EXT_CAP_ID_PWR Power Budgeting
325 */
326int pci_find_ext_capability(struct pci_dev *dev, int cap)
327{
328 u32 header;
Zhao, Yu557848c2008-10-13 19:18:07 +0800329 int ttl;
330 int pos = PCI_CFG_SPACE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Zhao, Yu557848c2008-10-13 19:18:07 +0800332 /* minimum 8 bytes per capability */
333 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
334
335 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 return 0;
337
338 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
339 return 0;
340
341 /*
342 * If we have no capabilities, this is indicated by cap ID,
343 * cap version and next pointer all being 0.
344 */
345 if (header == 0)
346 return 0;
347
348 while (ttl-- > 0) {
349 if (PCI_EXT_CAP_ID(header) == cap)
350 return pos;
351
352 pos = PCI_EXT_CAP_NEXT(header);
Zhao, Yu557848c2008-10-13 19:18:07 +0800353 if (pos < PCI_CFG_SPACE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 break;
355
356 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
357 break;
358 }
359
360 return 0;
361}
Brice Goglin3a720d72006-05-23 06:10:01 -0400362EXPORT_SYMBOL_GPL(pci_find_ext_capability);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Jesse Barnescf4c43d2009-07-15 13:13:00 -0700364/**
365 * pci_bus_find_ext_capability - find an extended capability
366 * @bus: the PCI bus to query
367 * @devfn: PCI device to query
368 * @cap: capability code
369 *
370 * Like pci_find_ext_capability() but works for pci devices that do not have a
371 * pci_dev structure set up yet.
372 *
373 * Returns the address of the requested capability structure within the
374 * device's PCI configuration space or 0 in case the device does not
375 * support it.
376 */
377int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
378 int cap)
379{
380 u32 header;
381 int ttl;
382 int pos = PCI_CFG_SPACE_SIZE;
383
384 /* minimum 8 bytes per capability */
385 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
386
387 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
388 return 0;
389 if (header == 0xffffffff || header == 0)
390 return 0;
391
392 while (ttl-- > 0) {
393 if (PCI_EXT_CAP_ID(header) == cap)
394 return pos;
395
396 pos = PCI_EXT_CAP_NEXT(header);
397 if (pos < PCI_CFG_SPACE_SIZE)
398 break;
399
400 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
401 break;
402 }
403
404 return 0;
405}
406
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100407static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
408{
409 int rc, ttl = PCI_FIND_CAP_TTL;
410 u8 cap, mask;
411
412 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
413 mask = HT_3BIT_CAP_MASK;
414 else
415 mask = HT_5BIT_CAP_MASK;
416
417 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
418 PCI_CAP_ID_HT, &ttl);
419 while (pos) {
420 rc = pci_read_config_byte(dev, pos + 3, &cap);
421 if (rc != PCIBIOS_SUCCESSFUL)
422 return 0;
423
424 if ((cap & mask) == ht_cap)
425 return pos;
426
Brice Goglin47a4d5b2007-01-10 23:15:29 -0800427 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
428 pos + PCI_CAP_LIST_NEXT,
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100429 PCI_CAP_ID_HT, &ttl);
430 }
431
432 return 0;
433}
434/**
435 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
436 * @dev: PCI device to query
437 * @pos: Position from which to continue searching
438 * @ht_cap: Hypertransport capability code
439 *
440 * To be used in conjunction with pci_find_ht_capability() to search for
441 * all capabilities matching @ht_cap. @pos should always be a value returned
442 * from pci_find_ht_capability().
443 *
444 * NB. To be 100% safe against broken PCI devices, the caller should take
445 * steps to avoid an infinite loop.
446 */
447int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
448{
449 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
450}
451EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
452
453/**
454 * pci_find_ht_capability - query a device's Hypertransport capabilities
455 * @dev: PCI device to query
456 * @ht_cap: Hypertransport capability code
457 *
458 * Tell if a device supports a given Hypertransport capability.
459 * Returns an address within the device's PCI configuration space
460 * or 0 in case the device does not support the request capability.
461 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
462 * which has a Hypertransport capability matching @ht_cap.
463 */
464int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
465{
466 int pos;
467
468 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
469 if (pos)
470 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
471
472 return pos;
473}
474EXPORT_SYMBOL_GPL(pci_find_ht_capability);
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476/**
477 * pci_find_parent_resource - return resource region of parent bus of given region
478 * @dev: PCI device structure contains resources to be searched
479 * @res: child resource record for which parent is sought
480 *
481 * For given resource region of given device, return the resource
482 * region of parent bus the given region is contained in or where
483 * it should be allocated from.
484 */
485struct resource *
486pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
487{
488 const struct pci_bus *bus = dev->bus;
489 int i;
Bjorn Helgaas89a74ec2010-02-23 10:24:31 -0700490 struct resource *best = NULL, *r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Bjorn Helgaas89a74ec2010-02-23 10:24:31 -0700492 pci_bus_for_each_resource(bus, r, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (!r)
494 continue;
495 if (res->start && !(res->start >= r->start && res->end <= r->end))
496 continue; /* Not contained */
497 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
498 continue; /* Wrong type */
499 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
500 return r; /* Exact match */
Linus Torvalds8c8def22009-11-09 12:04:32 -0800501 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
502 if (r->flags & IORESOURCE_PREFETCH)
503 continue;
504 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
505 if (!best)
506 best = r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508 return best;
509}
510
511/**
John W. Linville064b53d2005-07-27 10:19:44 -0400512 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
513 * @dev: PCI device to have its BARs restored
514 *
515 * Restore the BAR values for a given device, so as to make it
516 * accessible by its driver.
517 */
Adrian Bunkad6685992007-10-27 03:06:22 +0200518static void
John W. Linville064b53d2005-07-27 10:19:44 -0400519pci_restore_bars(struct pci_dev *dev)
520{
Yu Zhaobc5f5a82008-11-22 02:40:00 +0800521 int i;
John W. Linville064b53d2005-07-27 10:19:44 -0400522
Yu Zhaobc5f5a82008-11-22 02:40:00 +0800523 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
Yu Zhao14add802008-11-22 02:38:52 +0800524 pci_update_resource(dev, i);
John W. Linville064b53d2005-07-27 10:19:44 -0400525}
526
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200527static struct pci_platform_pm_ops *pci_platform_pm;
528
529int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
530{
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +0200531 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
532 || !ops->sleep_wake || !ops->can_wakeup)
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200533 return -EINVAL;
534 pci_platform_pm = ops;
535 return 0;
536}
537
538static inline bool platform_pci_power_manageable(struct pci_dev *dev)
539{
540 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
541}
542
543static inline int platform_pci_set_power_state(struct pci_dev *dev,
544 pci_power_t t)
545{
546 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
547}
548
549static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
550{
551 return pci_platform_pm ?
552 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
553}
Randy Dunlap8f7020d2005-10-23 11:57:38 -0700554
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +0200555static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
556{
557 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
558}
559
560static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
561{
562 return pci_platform_pm ?
563 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
564}
565
Rafael J. Wysockib67ea762010-02-17 23:44:09 +0100566static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
567{
568 return pci_platform_pm ?
569 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
570}
571
John W. Linville064b53d2005-07-27 10:19:44 -0400572/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200573 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
574 * given PCI device
575 * @dev: PCI device to handle.
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200576 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 *
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200578 * RETURN VALUE:
579 * -EINVAL if the requested state is invalid.
580 * -EIO if device does not support PCI PM or its PM capabilities register has a
581 * wrong version, or device doesn't support the requested state.
582 * 0 if device already is in the requested state.
583 * 0 if device's power state has been successfully changed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 */
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100585static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200587 u16 pmcsr;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200588 bool need_restore = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Rafael J. Wysocki4a865902009-03-16 22:40:36 +0100590 /* Check if we're already there */
591 if (dev->current_state == state)
592 return 0;
593
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200594 if (!dev->pm_cap)
Andrew Lunncca03de2007-07-09 11:55:58 -0700595 return -EIO;
596
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200597 if (state < PCI_D0 || state > PCI_D3hot)
598 return -EINVAL;
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 /* Validate current state:
601 * Can enter D0 from any state, but if we can only go deeper
602 * to sleep if we're already in a low power state
603 */
Rafael J. Wysocki4a865902009-03-16 22:40:36 +0100604 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200605 && dev->current_state > state) {
Bjorn Helgaas80ccba12008-06-13 10:52:11 -0600606 dev_err(&dev->dev, "invalid power transition "
607 "(from state %d to %d)\n", dev->current_state, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return -EINVAL;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 /* check if this device supports the desired state */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200612 if ((state == PCI_D1 && !dev->d1_support)
613 || (state == PCI_D2 && !dev->d2_support))
Daniel Ritz3fe9d192005-08-17 15:32:19 -0700614 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200616 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
John W. Linville064b53d2005-07-27 10:19:44 -0400617
John W. Linville32a36582005-09-14 09:52:42 -0400618 /* If we're (effectively) in D3, force entire word to 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 * This doesn't affect PME_Status, disables PME_En, and
620 * sets PowerState to 0.
621 */
John W. Linville32a36582005-09-14 09:52:42 -0400622 switch (dev->current_state) {
John W. Linvilled3535fb2005-09-28 17:50:51 -0400623 case PCI_D0:
624 case PCI_D1:
625 case PCI_D2:
626 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
627 pmcsr |= state;
628 break;
Rafael J. Wysockif62795f2009-05-18 22:51:12 +0200629 case PCI_D3hot:
630 case PCI_D3cold:
John W. Linville32a36582005-09-14 09:52:42 -0400631 case PCI_UNKNOWN: /* Boot-up */
632 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100633 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200634 need_restore = true;
John W. Linville32a36582005-09-14 09:52:42 -0400635 /* Fall-through: force to D0 */
John W. Linville32a36582005-09-14 09:52:42 -0400636 default:
John W. Linvilled3535fb2005-09-28 17:50:51 -0400637 pmcsr = 0;
John W. Linville32a36582005-09-14 09:52:42 -0400638 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 }
640
641 /* enter specified state */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200642 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 /* Mandatory power management transition delays */
645 /* see PCI PM 1.1 5.6.1 table 18 */
646 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +0100647 pci_dev_d3_sleep(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 else if (state == PCI_D2 || dev->current_state == PCI_D2)
Rafael J. Wysockiaa8c6c92009-01-16 21:54:43 +0100649 udelay(PCI_PM_D2_DELAY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Rafael J. Wysockie13cdbd2009-10-05 00:48:40 +0200651 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
652 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
653 if (dev->current_state != state && printk_ratelimit())
654 dev_info(&dev->dev, "Refused to change power state, "
655 "currently in D%d\n", dev->current_state);
John W. Linville064b53d2005-07-27 10:19:44 -0400656
657 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
658 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
659 * from D3hot to D0 _may_ perform an internal reset, thereby
660 * going to "D0 Uninitialized" rather than "D0 Initialized".
661 * For example, at least some versions of the 3c905B and the
662 * 3c556B exhibit this behaviour.
663 *
664 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
665 * devices in a D3hot state at boot. Consequently, we need to
666 * restore at least the BARs so that the device will be
667 * accessible to its driver.
668 */
669 if (need_restore)
670 pci_restore_bars(dev);
671
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100672 if (dev->bus->self)
Shaohua Li7d715a62008-02-25 09:46:41 +0800673 pcie_aspm_pm_state_change(dev->bus->self);
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 return 0;
676}
677
678/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200679 * pci_update_current_state - Read PCI power state of given device from its
680 * PCI PM registers and cache it
681 * @dev: PCI device to handle.
Rafael J. Wysockif06fc0b2008-12-27 16:30:52 +0100682 * @state: State to cache in case the device doesn't have the PM capability
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200683 */
Rafael J. Wysocki73410422009-01-07 13:07:15 +0100684void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200685{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200686 if (dev->pm_cap) {
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200687 u16 pmcsr;
688
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200689 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200690 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
Rafael J. Wysockif06fc0b2008-12-27 16:30:52 +0100691 } else {
692 dev->current_state = state;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200693 }
694}
695
696/**
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100697 * pci_platform_power_transition - Use platform to change device power state
698 * @dev: PCI device to handle.
699 * @state: State to put the device into.
700 */
701static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
702{
703 int error;
704
705 if (platform_pci_power_manageable(dev)) {
706 error = platform_pci_set_power_state(dev, state);
707 if (!error)
708 pci_update_current_state(dev, state);
Ajaykumar Hotchandanib51306c2011-12-12 13:57:36 +0530709 /* Fall back to PCI_D0 if native PM is not supported */
710 if (!dev->pm_cap)
711 dev->current_state = PCI_D0;
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100712 } else {
713 error = -ENODEV;
714 /* Fall back to PCI_D0 if native PM is not supported */
Rafael J. Wysockib3bad722009-05-17 20:17:06 +0200715 if (!dev->pm_cap)
716 dev->current_state = PCI_D0;
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100717 }
718
719 return error;
720}
721
722/**
723 * __pci_start_power_transition - Start power transition of a PCI device
724 * @dev: PCI device to handle.
725 * @state: State to put the device into.
726 */
727static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
728{
729 if (state == PCI_D0)
730 pci_platform_power_transition(dev, PCI_D0);
731}
732
733/**
734 * __pci_complete_power_transition - Complete power transition of a PCI device
735 * @dev: PCI device to handle.
736 * @state: State to put the device into.
737 *
738 * This function should not be called directly by device drivers.
739 */
740int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
741{
Matthew Garrettcc2893b2010-04-22 09:30:51 -0400742 return state >= PCI_D0 ?
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100743 pci_platform_power_transition(dev, state) : -EINVAL;
744}
745EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
746
747/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200748 * pci_set_power_state - Set the power state of a PCI device
749 * @dev: PCI device to handle.
750 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
751 *
Nick Andrew877d0312009-01-26 11:06:57 +0100752 * Transition a device to a new power state, using the platform firmware and/or
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200753 * the device's PCI PM registers.
754 *
755 * RETURN VALUE:
756 * -EINVAL if the requested state is invalid.
757 * -EIO if device does not support PCI PM or its PM capabilities register has a
758 * wrong version, or device doesn't support the requested state.
759 * 0 if device already is in the requested state.
760 * 0 if device's power state has been successfully changed.
761 */
762int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
763{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200764 int error;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200765
766 /* bound the state we're entering */
767 if (state > PCI_D3hot)
768 state = PCI_D3hot;
769 else if (state < PCI_D0)
770 state = PCI_D0;
771 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
772 /*
773 * If the device or the parent bridge do not support PCI PM,
774 * ignore the request if we're doing anything other than putting
775 * it into D0 (which would only happen on boot).
776 */
777 return 0;
778
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100779 __pci_start_power_transition(dev, state);
780
Alan Cox979b1792008-07-24 17:18:38 +0100781 /* This device is quirked not to be put into D3, so
782 don't put it in D3 */
783 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
784 return 0;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200785
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100786 error = pci_raw_set_power_state(dev, state);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200787
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100788 if (!__pci_complete_power_transition(dev, state))
789 error = 0;
Naga Chumbalkar1a680b72011-03-21 03:29:08 +0000790 /*
791 * When aspm_policy is "powersave" this call ensures
792 * that ASPM is configured.
793 */
794 if (!error && dev->bus->self)
795 pcie_aspm_powersave_config_link(dev->bus->self);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200796
797 return error;
798}
799
800/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 * pci_choose_state - Choose the power state of a PCI device
802 * @dev: PCI device to be suspended
803 * @state: target sleep state for the whole system. This is the value
804 * that is passed to suspend() function.
805 *
806 * Returns PCI power state suitable for given device and given system
807 * message.
808 */
809
810pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
811{
Shaohua Liab826ca2007-07-20 10:03:22 +0800812 pci_power_t ret;
David Shaohua Li0f644742005-03-19 00:15:48 -0500813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
815 return PCI_D0;
816
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200817 ret = platform_pci_choose_state(dev);
818 if (ret != PCI_POWER_ERROR)
819 return ret;
Pavel Machekca078ba2005-09-03 15:56:57 -0700820
821 switch (state.event) {
822 case PM_EVENT_ON:
823 return PCI_D0;
824 case PM_EVENT_FREEZE:
David Brownellb887d2e2006-08-14 23:11:05 -0700825 case PM_EVENT_PRETHAW:
826 /* REVISIT both freeze and pre-thaw "should" use D0 */
Pavel Machekca078ba2005-09-03 15:56:57 -0700827 case PM_EVENT_SUSPEND:
Rafael J. Wysocki3a2d5b72008-02-23 19:13:25 +0100828 case PM_EVENT_HIBERNATE:
Pavel Machekca078ba2005-09-03 15:56:57 -0700829 return PCI_D3hot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 default:
Bjorn Helgaas80ccba12008-06-13 10:52:11 -0600831 dev_info(&dev->dev, "unrecognized suspend event %d\n",
832 state.event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 BUG();
834 }
835 return PCI_D0;
836}
837
838EXPORT_SYMBOL(pci_choose_state);
839
Yu Zhao89858512009-02-16 02:55:47 +0800840#define PCI_EXP_SAVE_REGS 7
841
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800842#define pcie_cap_has_devctl(type, flags) 1
843#define pcie_cap_has_lnkctl(type, flags) \
844 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
845 (type == PCI_EXP_TYPE_ROOT_PORT || \
846 type == PCI_EXP_TYPE_ENDPOINT || \
847 type == PCI_EXP_TYPE_LEG_END))
848#define pcie_cap_has_sltctl(type, flags) \
849 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
850 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
851 (type == PCI_EXP_TYPE_DOWNSTREAM && \
852 (flags & PCI_EXP_FLAGS_SLOT))))
853#define pcie_cap_has_rtctl(type, flags) \
854 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
855 (type == PCI_EXP_TYPE_ROOT_PORT || \
856 type == PCI_EXP_TYPE_RC_EC))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800857
Yinghai Lu34a48762012-02-11 00:18:41 -0800858static struct pci_cap_saved_state *pci_find_saved_cap(
859 struct pci_dev *pci_dev, char cap)
860{
861 struct pci_cap_saved_state *tmp;
862 struct hlist_node *pos;
863
864 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
865 if (tmp->cap.cap_nr == cap)
866 return tmp;
867 }
868 return NULL;
869}
870
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300871static int pci_save_pcie_state(struct pci_dev *dev)
872{
873 int pos, i = 0;
874 struct pci_cap_saved_state *save_state;
875 u16 *cap;
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800876 u16 flags;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300877
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +0900878 pos = pci_pcie_cap(dev);
879 if (!pos)
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300880 return 0;
881
Eric W. Biederman9f355752007-03-08 13:06:13 -0700882 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300883 if (!save_state) {
Harvey Harrisone496b612009-01-07 16:22:37 -0800884 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300885 return -ENOMEM;
886 }
Alex Williamson24a47422011-05-10 10:02:11 -0600887 cap = (u16 *)&save_state->cap.data[0];
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300888
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800889 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
890
891 if (pcie_cap_has_devctl(dev->pcie_type, flags))
892 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
893 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
894 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
895 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
896 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
897 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
898 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100899
Myron Stowe9cb604e2012-06-01 15:16:43 -0600900 pos = pci_pcie_cap2(dev);
901 if (!pos)
902 return 0;
903
904 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
905 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
906 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300907 return 0;
908}
909
910static void pci_restore_pcie_state(struct pci_dev *dev)
911{
912 int i = 0, pos;
913 struct pci_cap_saved_state *save_state;
914 u16 *cap;
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800915 u16 flags;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300916
917 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
918 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
919 if (!save_state || pos <= 0)
920 return;
Alex Williamson24a47422011-05-10 10:02:11 -0600921 cap = (u16 *)&save_state->cap.data[0];
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300922
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800923 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
924
925 if (pcie_cap_has_devctl(dev->pcie_type, flags))
926 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
927 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
928 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
929 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
930 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
931 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
932 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
Myron Stowe9cb604e2012-06-01 15:16:43 -0600933
934 pos = pci_pcie_cap2(dev);
935 if (!pos)
936 return;
937
938 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
939 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
940 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300941}
942
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800943
944static int pci_save_pcix_state(struct pci_dev *dev)
945{
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100946 int pos;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800947 struct pci_cap_saved_state *save_state;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800948
949 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
950 if (pos <= 0)
951 return 0;
952
Shaohua Lif34303d2007-12-18 09:56:47 +0800953 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800954 if (!save_state) {
Harvey Harrisone496b612009-01-07 16:22:37 -0800955 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800956 return -ENOMEM;
957 }
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800958
Alex Williamson24a47422011-05-10 10:02:11 -0600959 pci_read_config_word(dev, pos + PCI_X_CMD,
960 (u16 *)save_state->cap.data);
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100961
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800962 return 0;
963}
964
965static void pci_restore_pcix_state(struct pci_dev *dev)
966{
967 int i = 0, pos;
968 struct pci_cap_saved_state *save_state;
969 u16 *cap;
970
971 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
972 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
973 if (!save_state || pos <= 0)
974 return;
Alex Williamson24a47422011-05-10 10:02:11 -0600975 cap = (u16 *)&save_state->cap.data[0];
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800976
977 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800978}
979
980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981/**
982 * pci_save_state - save the PCI configuration space of a device before suspending
983 * @dev: - PCI device that we're dealing with
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 */
985int
986pci_save_state(struct pci_dev *dev)
987{
988 int i;
989 /* XXX: 100% dword access ok here? */
990 for (i = 0; i < 16; i++)
Kleber Sacilotto de Souza9e0b5b22009-11-25 00:55:51 -0200991 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
Rafael J. Wysockiaa8c6c92009-01-16 21:54:43 +0100992 dev->state_saved = true;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300993 if ((i = pci_save_pcie_state(dev)) != 0)
994 return i;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800995 if ((i = pci_save_pcix_state(dev)) != 0)
996 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return 0;
998}
999
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001000static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1001 u32 saved_val, int retry)
1002{
1003 u32 val;
1004
1005 pci_read_config_dword(pdev, offset, &val);
1006 if (val == saved_val)
1007 return;
1008
1009 for (;;) {
1010 dev_dbg(&pdev->dev, "restoring config space at offset "
1011 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
1012 pci_write_config_dword(pdev, offset, saved_val);
1013 if (retry-- <= 0)
1014 return;
1015
1016 pci_read_config_dword(pdev, offset, &val);
1017 if (val == saved_val)
1018 return;
1019
1020 mdelay(1);
1021 }
1022}
1023
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001024static void pci_restore_config_space_range(struct pci_dev *pdev,
1025 int start, int end, int retry)
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001026{
1027 int index;
1028
1029 for (index = end; index >= start; index--)
1030 pci_restore_config_dword(pdev, 4 * index,
1031 pdev->saved_config_space[index],
1032 retry);
1033}
1034
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001035static void pci_restore_config_space(struct pci_dev *pdev)
1036{
1037 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1038 pci_restore_config_space_range(pdev, 10, 15, 0);
1039 /* Restore BARs before the command register. */
1040 pci_restore_config_space_range(pdev, 4, 9, 10);
1041 pci_restore_config_space_range(pdev, 0, 3, 0);
1042 } else {
1043 pci_restore_config_space_range(pdev, 0, 15, 0);
1044 }
1045}
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047/**
1048 * pci_restore_state - Restore the saved state of a PCI device
1049 * @dev: - PCI device that we're dealing with
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 */
Jon Mason1d3c16a2010-11-30 17:43:26 -06001051void pci_restore_state(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
Alek Duc82f63e2009-08-08 08:46:19 +08001053 if (!dev->state_saved)
Jon Mason1d3c16a2010-11-30 17:43:26 -06001054 return;
Rafael J. Wysocki4b77b0a2009-09-09 23:49:59 +02001055
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +03001056 /* PCI Express register must be restored first */
1057 pci_restore_pcie_state(dev);
Hao, Xudong1900ca12011-12-17 21:24:40 +08001058 pci_restore_ats_state(dev);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +03001059
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001060 pci_restore_config_space(dev);
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001061
Stephen Hemmingercc692a52006-11-08 16:17:15 -08001062 pci_restore_pcix_state(dev);
Shaohua Li41017f02006-02-08 17:11:38 +08001063 pci_restore_msi_state(dev);
Yu Zhao8c5cdb62009-03-20 11:25:12 +08001064 pci_restore_iov_state(dev);
Michael Ellerman8fed4b62007-01-25 19:34:08 +11001065
Rafael J. Wysocki4b77b0a2009-09-09 23:49:59 +02001066 dev->state_saved = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067}
1068
Alex Williamsonffbdd3f2011-05-10 10:02:27 -06001069struct pci_saved_state {
1070 u32 config_space[16];
1071 struct pci_cap_saved_data cap[0];
1072};
1073
1074/**
1075 * pci_store_saved_state - Allocate and return an opaque struct containing
1076 * the device saved state.
1077 * @dev: PCI device that we're dealing with
1078 *
1079 * Rerturn NULL if no state or error.
1080 */
1081struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1082{
1083 struct pci_saved_state *state;
1084 struct pci_cap_saved_state *tmp;
1085 struct pci_cap_saved_data *cap;
1086 struct hlist_node *pos;
1087 size_t size;
1088
1089 if (!dev->state_saved)
1090 return NULL;
1091
1092 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1093
1094 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1095 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1096
1097 state = kzalloc(size, GFP_KERNEL);
1098 if (!state)
1099 return NULL;
1100
1101 memcpy(state->config_space, dev->saved_config_space,
1102 sizeof(state->config_space));
1103
1104 cap = state->cap;
1105 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1106 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1107 memcpy(cap, &tmp->cap, len);
1108 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1109 }
1110 /* Empty cap_save terminates list */
1111
1112 return state;
1113}
1114EXPORT_SYMBOL_GPL(pci_store_saved_state);
1115
1116/**
1117 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1118 * @dev: PCI device that we're dealing with
1119 * @state: Saved state returned from pci_store_saved_state()
1120 */
1121int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1122{
1123 struct pci_cap_saved_data *cap;
1124
1125 dev->state_saved = false;
1126
1127 if (!state)
1128 return 0;
1129
1130 memcpy(dev->saved_config_space, state->config_space,
1131 sizeof(state->config_space));
1132
1133 cap = state->cap;
1134 while (cap->size) {
1135 struct pci_cap_saved_state *tmp;
1136
1137 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1138 if (!tmp || tmp->cap.size != cap->size)
1139 return -EINVAL;
1140
1141 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1142 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1143 sizeof(struct pci_cap_saved_data) + cap->size);
1144 }
1145
1146 dev->state_saved = true;
1147 return 0;
1148}
1149EXPORT_SYMBOL_GPL(pci_load_saved_state);
1150
1151/**
1152 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1153 * and free the memory allocated for it.
1154 * @dev: PCI device that we're dealing with
1155 * @state: Pointer to saved state returned from pci_store_saved_state()
1156 */
1157int pci_load_and_free_saved_state(struct pci_dev *dev,
1158 struct pci_saved_state **state)
1159{
1160 int ret = pci_load_saved_state(dev, *state);
1161 kfree(*state);
1162 *state = NULL;
1163 return ret;
1164}
1165EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1166
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001167static int do_pci_enable_device(struct pci_dev *dev, int bars)
1168{
1169 int err;
1170
1171 err = pci_set_power_state(dev, PCI_D0);
1172 if (err < 0 && err != -EIO)
1173 return err;
1174 err = pcibios_enable_device(dev, bars);
1175 if (err < 0)
1176 return err;
1177 pci_fixup_device(pci_fixup_enable, dev);
1178
1179 return 0;
1180}
1181
1182/**
Tejun Heo0b62e132007-07-27 14:43:35 +09001183 * pci_reenable_device - Resume abandoned device
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001184 * @dev: PCI device to be resumed
1185 *
1186 * Note this function is a backend of pci_default_resume and is not supposed
1187 * to be called by normal code, write proper resume handler and use it instead.
1188 */
Tejun Heo0b62e132007-07-27 14:43:35 +09001189int pci_reenable_device(struct pci_dev *dev)
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001190{
Yuji Shimada296ccb02009-04-03 16:41:46 +09001191 if (pci_is_enabled(dev))
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001192 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1193 return 0;
1194}
1195
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001196static int __pci_enable_device_flags(struct pci_dev *dev,
1197 resource_size_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
1199 int err;
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001200 int i, bars = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Jesse Barnes97c145f2010-11-05 15:16:36 -04001202 /*
1203 * Power state could be unknown at this point, either due to a fresh
1204 * boot or a device removal call. So get the current power state
1205 * so that things like MSI message writing will behave as expected
1206 * (e.g. if the device really is in D0 at enable time).
1207 */
1208 if (dev->pm_cap) {
1209 u16 pmcsr;
1210 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1211 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1212 }
1213
Hidetoshi Seto9fb625c2006-12-18 10:28:43 +09001214 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1215 return 0; /* already enabled */
1216
Yinghai Lu497f16f2011-12-17 18:33:37 -08001217 /* only skip sriov related */
1218 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1219 if (dev->resource[i].flags & flags)
1220 bars |= (1 << i);
1221 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001222 if (dev->resource[i].flags & flags)
1223 bars |= (1 << i);
1224
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001225 err = do_pci_enable_device(dev, bars);
Greg Kroah-Hartman95a62962005-07-28 11:37:33 -07001226 if (err < 0)
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001227 atomic_dec(&dev->enable_cnt);
Hidetoshi Seto9fb625c2006-12-18 10:28:43 +09001228 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
1231/**
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001232 * pci_enable_device_io - Initialize a device for use with IO space
1233 * @dev: PCI device to be initialized
1234 *
1235 * Initialize device before it's used by a driver. Ask low-level code
1236 * to enable I/O resources. Wake up the device if it was suspended.
1237 * Beware, this function can fail.
1238 */
1239int pci_enable_device_io(struct pci_dev *dev)
1240{
1241 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1242}
1243
1244/**
1245 * pci_enable_device_mem - Initialize a device for use with Memory space
1246 * @dev: PCI device to be initialized
1247 *
1248 * Initialize device before it's used by a driver. Ask low-level code
1249 * to enable Memory resources. Wake up the device if it was suspended.
1250 * Beware, this function can fail.
1251 */
1252int pci_enable_device_mem(struct pci_dev *dev)
1253{
1254 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1255}
1256
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257/**
1258 * pci_enable_device - Initialize device before it's used by a driver.
1259 * @dev: PCI device to be initialized
1260 *
1261 * Initialize device before it's used by a driver. Ask low-level code
1262 * to enable I/O and memory. Wake up the device if it was suspended.
1263 * Beware, this function can fail.
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001264 *
1265 * Note we don't actually enable the device many times if we call
1266 * this function repeatedly (we just increment the count).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 */
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001268int pci_enable_device(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269{
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001270 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271}
1272
Tejun Heo9ac78492007-01-20 16:00:26 +09001273/*
1274 * Managed PCI resources. This manages device on/off, intx/msi/msix
1275 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1276 * there's no need to track it separately. pci_devres is initialized
1277 * when a device is enabled using managed PCI device enable interface.
1278 */
1279struct pci_devres {
Tejun Heo7f375f32007-02-25 04:36:01 -08001280 unsigned int enabled:1;
1281 unsigned int pinned:1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001282 unsigned int orig_intx:1;
1283 unsigned int restore_intx:1;
1284 u32 region_mask;
1285};
1286
1287static void pcim_release(struct device *gendev, void *res)
1288{
1289 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1290 struct pci_devres *this = res;
1291 int i;
1292
1293 if (dev->msi_enabled)
1294 pci_disable_msi(dev);
1295 if (dev->msix_enabled)
1296 pci_disable_msix(dev);
1297
1298 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1299 if (this->region_mask & (1 << i))
1300 pci_release_region(dev, i);
1301
1302 if (this->restore_intx)
1303 pci_intx(dev, this->orig_intx);
1304
Tejun Heo7f375f32007-02-25 04:36:01 -08001305 if (this->enabled && !this->pinned)
Tejun Heo9ac78492007-01-20 16:00:26 +09001306 pci_disable_device(dev);
1307}
1308
1309static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1310{
1311 struct pci_devres *dr, *new_dr;
1312
1313 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1314 if (dr)
1315 return dr;
1316
1317 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1318 if (!new_dr)
1319 return NULL;
1320 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1321}
1322
1323static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1324{
1325 if (pci_is_managed(pdev))
1326 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1327 return NULL;
1328}
1329
1330/**
1331 * pcim_enable_device - Managed pci_enable_device()
1332 * @pdev: PCI device to be initialized
1333 *
1334 * Managed pci_enable_device().
1335 */
1336int pcim_enable_device(struct pci_dev *pdev)
1337{
1338 struct pci_devres *dr;
1339 int rc;
1340
1341 dr = get_pci_dr(pdev);
1342 if (unlikely(!dr))
1343 return -ENOMEM;
Tejun Heob95d58e2008-01-30 18:20:04 +09001344 if (dr->enabled)
1345 return 0;
Tejun Heo9ac78492007-01-20 16:00:26 +09001346
1347 rc = pci_enable_device(pdev);
1348 if (!rc) {
1349 pdev->is_managed = 1;
Tejun Heo7f375f32007-02-25 04:36:01 -08001350 dr->enabled = 1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001351 }
1352 return rc;
1353}
1354
1355/**
1356 * pcim_pin_device - Pin managed PCI device
1357 * @pdev: PCI device to pin
1358 *
1359 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1360 * driver detach. @pdev must have been enabled with
1361 * pcim_enable_device().
1362 */
1363void pcim_pin_device(struct pci_dev *pdev)
1364{
1365 struct pci_devres *dr;
1366
1367 dr = find_pci_dr(pdev);
Tejun Heo7f375f32007-02-25 04:36:01 -08001368 WARN_ON(!dr || !dr->enabled);
Tejun Heo9ac78492007-01-20 16:00:26 +09001369 if (dr)
Tejun Heo7f375f32007-02-25 04:36:01 -08001370 dr->pinned = 1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001371}
1372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373/**
1374 * pcibios_disable_device - disable arch specific PCI resources for device dev
1375 * @dev: the PCI device to disable
1376 *
1377 * Disables architecture specific PCI resources for the device. This
1378 * is the default implementation. Architecture implementations can
1379 * override this.
1380 */
1381void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1382
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001383static void do_pci_disable_device(struct pci_dev *dev)
1384{
1385 u16 pci_command;
1386
1387 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1388 if (pci_command & PCI_COMMAND_MASTER) {
1389 pci_command &= ~PCI_COMMAND_MASTER;
1390 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1391 }
1392
1393 pcibios_disable_device(dev);
1394}
1395
1396/**
1397 * pci_disable_enabled_device - Disable device without updating enable_cnt
1398 * @dev: PCI device to disable
1399 *
1400 * NOTE: This function is a backend of PCI power management routines and is
1401 * not supposed to be called drivers.
1402 */
1403void pci_disable_enabled_device(struct pci_dev *dev)
1404{
Yuji Shimada296ccb02009-04-03 16:41:46 +09001405 if (pci_is_enabled(dev))
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001406 do_pci_disable_device(dev);
1407}
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409/**
1410 * pci_disable_device - Disable PCI device after use
1411 * @dev: PCI device to be disabled
1412 *
1413 * Signal to the system that the PCI device is not in use by the system
1414 * anymore. This only involves disabling PCI bus-mastering, if active.
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001415 *
1416 * Note we don't actually disable the device until all callers of
Roman Fietzeee6583f2010-05-18 14:45:47 +02001417 * pci_enable_device() have called pci_disable_device().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 */
1419void
1420pci_disable_device(struct pci_dev *dev)
1421{
Tejun Heo9ac78492007-01-20 16:00:26 +09001422 struct pci_devres *dr;
Shaohua Li99dc8042006-05-26 10:58:27 +08001423
Tejun Heo9ac78492007-01-20 16:00:26 +09001424 dr = find_pci_dr(dev);
1425 if (dr)
Tejun Heo7f375f32007-02-25 04:36:01 -08001426 dr->enabled = 0;
Tejun Heo9ac78492007-01-20 16:00:26 +09001427
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001428 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1429 return;
1430
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001431 do_pci_disable_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001433 dev->is_busmaster = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434}
1435
1436/**
Brian Kingf7bdd122007-04-06 16:39:36 -05001437 * pcibios_set_pcie_reset_state - set reset state for device dev
Stefan Assmann45e829e2009-12-03 06:49:24 -05001438 * @dev: the PCIe device reset
Brian Kingf7bdd122007-04-06 16:39:36 -05001439 * @state: Reset state to enter into
1440 *
1441 *
Stefan Assmann45e829e2009-12-03 06:49:24 -05001442 * Sets the PCIe reset state for the device. This is the default
Brian Kingf7bdd122007-04-06 16:39:36 -05001443 * implementation. Architecture implementations can override this.
1444 */
1445int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1446 enum pcie_reset_state state)
1447{
1448 return -EINVAL;
1449}
1450
1451/**
1452 * pci_set_pcie_reset_state - set reset state for device dev
Stefan Assmann45e829e2009-12-03 06:49:24 -05001453 * @dev: the PCIe device reset
Brian Kingf7bdd122007-04-06 16:39:36 -05001454 * @state: Reset state to enter into
1455 *
1456 *
1457 * Sets the PCI reset state for the device.
1458 */
1459int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1460{
1461 return pcibios_set_pcie_reset_state(dev, state);
1462}
1463
1464/**
Rafael J. Wysocki58ff4632010-02-17 23:36:58 +01001465 * pci_check_pme_status - Check if given device has generated PME.
1466 * @dev: Device to check.
1467 *
1468 * Check the PME status of the device and if set, clear it and clear PME enable
1469 * (if set). Return 'true' if PME status and PME enable were both set or
1470 * 'false' otherwise.
1471 */
1472bool pci_check_pme_status(struct pci_dev *dev)
1473{
1474 int pmcsr_pos;
1475 u16 pmcsr;
1476 bool ret = false;
1477
1478 if (!dev->pm_cap)
1479 return false;
1480
1481 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1482 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1483 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1484 return false;
1485
1486 /* Clear PME status. */
1487 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1488 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1489 /* Disable PME to avoid interrupt flood. */
1490 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1491 ret = true;
1492 }
1493
1494 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1495
1496 return ret;
1497}
1498
1499/**
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001500 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1501 * @dev: Device to handle.
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001502 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001503 *
1504 * Check if @dev has generated PME and queue a resume request for it in that
1505 * case.
1506 */
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001507static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001508{
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001509 if (pme_poll_reset && dev->pme_poll)
1510 dev->pme_poll = false;
1511
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001512 if (pci_check_pme_status(dev)) {
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001513 pci_wakeup_event(dev);
Rafael J. Wysocki0f953bf2010-12-29 13:22:08 +01001514 pm_request_resume(&dev->dev);
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001515 }
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001516 return 0;
1517}
1518
1519/**
1520 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1521 * @bus: Top bus of the subtree to walk.
1522 */
1523void pci_pme_wakeup_bus(struct pci_bus *bus)
1524{
1525 if (bus)
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001526 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001527}
1528
1529/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001530 * pci_pme_capable - check the capability of PCI device to generate PME#
1531 * @dev: PCI device to handle.
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001532 * @state: PCI state from which device will issue PME#.
1533 */
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001534bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001535{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001536 if (!dev->pm_cap)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001537 return false;
1538
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001539 return !!(dev->pme_support & (1 << state));
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001540}
1541
Matthew Garrettdf17e622010-10-04 14:22:29 -04001542static void pci_pme_list_scan(struct work_struct *work)
1543{
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001544 struct pci_pme_device *pme_dev, *n;
Matthew Garrettdf17e622010-10-04 14:22:29 -04001545
1546 mutex_lock(&pci_pme_list_mutex);
1547 if (!list_empty(&pci_pme_list)) {
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001548 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1549 if (pme_dev->dev->pme_poll) {
1550 pci_pme_wakeup(pme_dev->dev, NULL);
1551 } else {
1552 list_del(&pme_dev->list);
1553 kfree(pme_dev);
1554 }
1555 }
1556 if (!list_empty(&pci_pme_list))
1557 schedule_delayed_work(&pci_pme_work,
1558 msecs_to_jiffies(PME_TIMEOUT));
Matthew Garrettdf17e622010-10-04 14:22:29 -04001559 }
1560 mutex_unlock(&pci_pme_list_mutex);
1561}
1562
1563/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001564 * pci_pme_active - enable or disable PCI device's PME# function
1565 * @dev: PCI device to handle.
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001566 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1567 *
1568 * The caller must verify that the device is capable of generating PME# before
1569 * calling this function with @enable equal to 'true'.
1570 */
Rafael J. Wysocki5a6c9b62008-08-08 00:14:24 +02001571void pci_pme_active(struct pci_dev *dev, bool enable)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001572{
1573 u16 pmcsr;
1574
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001575 if (!dev->pm_cap)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001576 return;
1577
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001578 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001579 /* Clear PME_Status by writing 1 to it and enable PME# */
1580 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1581 if (!enable)
1582 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1583
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001584 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001585
Matthew Garrettdf17e622010-10-04 14:22:29 -04001586 /* PCI (as opposed to PCIe) PME requires that the device have
1587 its PME# line hooked up correctly. Not all hardware vendors
1588 do this, so the PME never gets delivered and the device
1589 remains asleep. The easiest way around this is to
1590 periodically walk the list of suspended devices and check
1591 whether any have their PME flag set. The assumption is that
1592 we'll wake up often enough anyway that this won't be a huge
1593 hit, and the power savings from the devices will still be a
1594 win. */
1595
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001596 if (dev->pme_poll) {
Matthew Garrettdf17e622010-10-04 14:22:29 -04001597 struct pci_pme_device *pme_dev;
1598 if (enable) {
1599 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1600 GFP_KERNEL);
1601 if (!pme_dev)
1602 goto out;
1603 pme_dev->dev = dev;
1604 mutex_lock(&pci_pme_list_mutex);
1605 list_add(&pme_dev->list, &pci_pme_list);
1606 if (list_is_singular(&pci_pme_list))
1607 schedule_delayed_work(&pci_pme_work,
1608 msecs_to_jiffies(PME_TIMEOUT));
1609 mutex_unlock(&pci_pme_list_mutex);
1610 } else {
1611 mutex_lock(&pci_pme_list_mutex);
1612 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1613 if (pme_dev->dev == dev) {
1614 list_del(&pme_dev->list);
1615 kfree(pme_dev);
1616 break;
1617 }
1618 }
1619 mutex_unlock(&pci_pme_list_mutex);
1620 }
1621 }
1622
1623out:
Vincent Palatin85b85822011-12-05 11:51:18 -08001624 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001625}
1626
1627/**
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001628 * __pci_enable_wake - enable PCI device as wakeup event source
David Brownell075c1772007-04-26 00:12:06 -07001629 * @dev: PCI device affected
1630 * @state: PCI state from which device will issue wakeup events
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001631 * @runtime: True if the events are to be generated at run time
David Brownell075c1772007-04-26 00:12:06 -07001632 * @enable: True to enable event generation; false to disable
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 *
David Brownell075c1772007-04-26 00:12:06 -07001634 * This enables the device as a wakeup event source, or disables it.
1635 * When such events involves platform-specific hooks, those hooks are
1636 * called automatically by this routine.
1637 *
1638 * Devices with legacy power management (no standard PCI PM capabilities)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001639 * always require such platform hooks.
David Brownell075c1772007-04-26 00:12:06 -07001640 *
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001641 * RETURN VALUE:
1642 * 0 is returned on success
1643 * -EINVAL is returned if device is not supposed to wake up the system
1644 * Error code depending on the platform is returned if both the platform and
1645 * the native mechanism fail to enable the generation of wake-up events
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 */
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001647int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1648 bool runtime, bool enable)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649{
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001650 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001652 if (enable && !runtime && !device_may_wakeup(&dev->dev))
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001653 return -EINVAL;
1654
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001655 /* Don't do the same thing twice in a row for one device. */
1656 if (!!enable == !!dev->wakeup_prepared)
1657 return 0;
1658
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001659 /*
1660 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1661 * Anderson we should be doing PME# wake enable followed by ACPI wake
1662 * enable. To disable wake-up we call the platform first, for symmetry.
David Brownell075c1772007-04-26 00:12:06 -07001663 */
1664
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001665 if (enable) {
1666 int error;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001667
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001668 if (pci_pme_capable(dev, state))
1669 pci_pme_active(dev, true);
1670 else
1671 ret = 1;
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001672 error = runtime ? platform_pci_run_wake(dev, true) :
1673 platform_pci_sleep_wake(dev, true);
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001674 if (ret)
1675 ret = error;
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001676 if (!ret)
1677 dev->wakeup_prepared = true;
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001678 } else {
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001679 if (runtime)
1680 platform_pci_run_wake(dev, false);
1681 else
1682 platform_pci_sleep_wake(dev, false);
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001683 pci_pme_active(dev, false);
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001684 dev->wakeup_prepared = false;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001685 }
1686
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001687 return ret;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001688}
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001689EXPORT_SYMBOL(__pci_enable_wake);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001690
1691/**
Rafael J. Wysocki0235c4f2008-08-18 21:38:00 +02001692 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1693 * @dev: PCI device to prepare
1694 * @enable: True to enable wake-up event generation; false to disable
1695 *
1696 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1697 * and this function allows them to set that up cleanly - pci_enable_wake()
1698 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1699 * ordering constraints.
1700 *
1701 * This function only returns error code if the device is not capable of
1702 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1703 * enable wake-up power for it.
1704 */
1705int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1706{
1707 return pci_pme_capable(dev, PCI_D3cold) ?
1708 pci_enable_wake(dev, PCI_D3cold, enable) :
1709 pci_enable_wake(dev, PCI_D3hot, enable);
1710}
1711
1712/**
Jesse Barnes37139072008-07-28 11:49:26 -07001713 * pci_target_state - find an appropriate low power state for a given PCI dev
1714 * @dev: PCI device
1715 *
1716 * Use underlying platform code to find a supported low power state for @dev.
1717 * If the platform can't manage @dev, return the deepest state from which it
1718 * can generate wake events, based on any available PME info.
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001719 */
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001720pci_power_t pci_target_state(struct pci_dev *dev)
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001721{
1722 pci_power_t target_state = PCI_D3hot;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001723
1724 if (platform_pci_power_manageable(dev)) {
1725 /*
1726 * Call the platform to choose the target state of the device
1727 * and enable wake-up from this state if supported.
1728 */
1729 pci_power_t state = platform_pci_choose_state(dev);
1730
1731 switch (state) {
1732 case PCI_POWER_ERROR:
1733 case PCI_UNKNOWN:
1734 break;
1735 case PCI_D1:
1736 case PCI_D2:
1737 if (pci_no_d1d2(dev))
1738 break;
1739 default:
1740 target_state = state;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001741 }
Rafael J. Wysockid2abdf62009-06-14 21:25:02 +02001742 } else if (!dev->pm_cap) {
1743 target_state = PCI_D0;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001744 } else if (device_may_wakeup(&dev->dev)) {
1745 /*
1746 * Find the deepest state from which the device can generate
1747 * wake-up events, make it the target state and enable device
1748 * to generate PME#.
1749 */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001750 if (dev->pme_support) {
1751 while (target_state
1752 && !(dev->pme_support & (1 << target_state)))
1753 target_state--;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001754 }
1755 }
1756
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001757 return target_state;
1758}
1759
1760/**
1761 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1762 * @dev: Device to handle.
1763 *
1764 * Choose the power state appropriate for the device depending on whether
1765 * it can wake up the system and/or is power manageable by the platform
1766 * (PCI_D3hot is the default) and put the device into that state.
1767 */
1768int pci_prepare_to_sleep(struct pci_dev *dev)
1769{
1770 pci_power_t target_state = pci_target_state(dev);
1771 int error;
1772
1773 if (target_state == PCI_POWER_ERROR)
1774 return -EIO;
1775
Rafael J. Wysocki8efb8c72009-03-30 21:46:27 +02001776 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
Rafael J. Wysockic157dfa2008-07-13 22:45:06 +02001777
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001778 error = pci_set_power_state(dev, target_state);
1779
1780 if (error)
1781 pci_enable_wake(dev, target_state, false);
1782
1783 return error;
1784}
1785
1786/**
Randy Dunlap443bd1c2008-07-21 09:27:18 -07001787 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001788 * @dev: Device to handle.
1789 *
Thomas Weber88393162010-03-16 11:47:56 +01001790 * Disable device's system wake-up capability and put it into D0.
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001791 */
1792int pci_back_from_sleep(struct pci_dev *dev)
1793{
1794 pci_enable_wake(dev, PCI_D0, false);
1795 return pci_set_power_state(dev, PCI_D0);
1796}
1797
1798/**
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001799 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1800 * @dev: PCI device being suspended.
1801 *
1802 * Prepare @dev to generate wake-up events at run time and put it into a low
1803 * power state.
1804 */
1805int pci_finish_runtime_suspend(struct pci_dev *dev)
1806{
1807 pci_power_t target_state = pci_target_state(dev);
1808 int error;
1809
1810 if (target_state == PCI_POWER_ERROR)
1811 return -EIO;
1812
1813 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1814
1815 error = pci_set_power_state(dev, target_state);
1816
1817 if (error)
1818 __pci_enable_wake(dev, target_state, true, false);
1819
1820 return error;
1821}
1822
1823/**
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001824 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1825 * @dev: Device to check.
1826 *
1827 * Return true if the device itself is cabable of generating wake-up events
1828 * (through the platform or using the native PCIe PME) or if the device supports
1829 * PME and one of its upstream bridges can generate wake-up events.
1830 */
1831bool pci_dev_run_wake(struct pci_dev *dev)
1832{
1833 struct pci_bus *bus = dev->bus;
1834
1835 if (device_run_wake(&dev->dev))
1836 return true;
1837
1838 if (!dev->pme_support)
1839 return false;
1840
1841 while (bus->parent) {
1842 struct pci_dev *bridge = bus->self;
1843
1844 if (device_run_wake(&bridge->dev))
1845 return true;
1846
1847 bus = bus->parent;
1848 }
1849
1850 /* We have reached the root bus. */
1851 if (bus->bridge)
1852 return device_run_wake(bus->bridge);
1853
1854 return false;
1855}
1856EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1857
1858/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001859 * pci_pm_init - Initialize PM functions of given PCI device
1860 * @dev: PCI device to handle.
1861 */
1862void pci_pm_init(struct pci_dev *dev)
1863{
1864 int pm;
1865 u16 pmc;
David Brownell075c1772007-04-26 00:12:06 -07001866
Rafael J. Wysockibb910a72010-02-27 21:37:37 +01001867 pm_runtime_forbid(&dev->dev);
Rafael J. Wysockia1e4d722010-02-08 19:16:33 +01001868 device_enable_async_suspend(&dev->dev);
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001869 dev->wakeup_prepared = false;
Rafael J. Wysockibb910a72010-02-27 21:37:37 +01001870
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001871 dev->pm_cap = 0;
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 /* find PCI PM capability in list */
1874 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
David Brownell075c1772007-04-26 00:12:06 -07001875 if (!pm)
Linus Torvalds50246dd2009-01-16 08:14:51 -08001876 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 /* Check device's ability to generate PME# */
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001878 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001880 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1881 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1882 pmc & PCI_PM_CAP_VER_MASK);
Linus Torvalds50246dd2009-01-16 08:14:51 -08001883 return;
David Brownell075c1772007-04-26 00:12:06 -07001884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001886 dev->pm_cap = pm;
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01001887 dev->d3_delay = PCI_PM_D3_WAIT;
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001888
1889 dev->d1_support = false;
1890 dev->d2_support = false;
1891 if (!pci_no_d1d2(dev)) {
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001892 if (pmc & PCI_PM_CAP_D1)
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001893 dev->d1_support = true;
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001894 if (pmc & PCI_PM_CAP_D2)
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001895 dev->d2_support = true;
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001896
1897 if (dev->d1_support || dev->d2_support)
1898 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
Jesse Barnesec84f122008-09-23 11:43:34 -07001899 dev->d1_support ? " D1" : "",
1900 dev->d2_support ? " D2" : "");
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001901 }
1902
1903 pmc &= PCI_PM_CAP_PME_MASK;
1904 if (pmc) {
Bjorn Helgaas10c3d712009-11-04 10:32:42 -07001905 dev_printk(KERN_DEBUG, &dev->dev,
1906 "PME# supported from%s%s%s%s%s\n",
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001907 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1908 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1909 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1910 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1911 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001912 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001913 dev->pme_poll = true;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001914 /*
1915 * Make device's PM flags reflect the wake-up capability, but
1916 * let the user space enable it to wake up the system as needed.
1917 */
1918 device_set_wakeup_capable(&dev->dev, true);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001919 /* Disable the PME# generation functionality */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001920 pci_pme_active(dev, false);
1921 } else {
1922 dev->pme_support = 0;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924}
1925
Yu Zhao58c3a722008-10-14 14:02:53 +08001926/**
Jesse Barneseb9c39d2008-12-17 12:10:05 -08001927 * platform_pci_wakeup_init - init platform wakeup if present
1928 * @dev: PCI device
1929 *
1930 * Some devices don't have PCI PM caps but can still generate wakeup
1931 * events through platform methods (like ACPI events). If @dev supports
1932 * platform wakeup events, set the device flag to indicate as much. This
1933 * may be redundant if the device also supports PCI PM caps, but double
1934 * initialization should be safe in that case.
1935 */
1936void platform_pci_wakeup_init(struct pci_dev *dev)
1937{
1938 if (!platform_pci_can_wakeup(dev))
1939 return;
1940
1941 device_set_wakeup_capable(&dev->dev, true);
Jesse Barneseb9c39d2008-12-17 12:10:05 -08001942 platform_pci_sleep_wake(dev, false);
1943}
1944
Yinghai Lu34a48762012-02-11 00:18:41 -08001945static void pci_add_saved_cap(struct pci_dev *pci_dev,
1946 struct pci_cap_saved_state *new_cap)
1947{
1948 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1949}
1950
Jesse Barneseb9c39d2008-12-17 12:10:05 -08001951/**
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01001952 * pci_add_save_buffer - allocate buffer for saving given capability registers
1953 * @dev: the PCI device
1954 * @cap: the capability to allocate the buffer for
1955 * @size: requested size of the buffer
1956 */
1957static int pci_add_cap_save_buffer(
1958 struct pci_dev *dev, char cap, unsigned int size)
1959{
1960 int pos;
1961 struct pci_cap_saved_state *save_state;
1962
1963 pos = pci_find_capability(dev, cap);
1964 if (pos <= 0)
1965 return 0;
1966
1967 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1968 if (!save_state)
1969 return -ENOMEM;
1970
Alex Williamson24a47422011-05-10 10:02:11 -06001971 save_state->cap.cap_nr = cap;
1972 save_state->cap.size = size;
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01001973 pci_add_saved_cap(dev, save_state);
1974
1975 return 0;
1976}
1977
1978/**
1979 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1980 * @dev: the PCI device
1981 */
1982void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1983{
1984 int error;
1985
Yu Zhao89858512009-02-16 02:55:47 +08001986 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1987 PCI_EXP_SAVE_REGS * sizeof(u16));
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01001988 if (error)
1989 dev_err(&dev->dev,
1990 "unable to preallocate PCI Express save buffer\n");
1991
1992 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1993 if (error)
1994 dev_err(&dev->dev,
1995 "unable to preallocate PCI-X save buffer\n");
1996}
1997
Yinghai Luf7968412012-02-11 00:18:30 -08001998void pci_free_cap_save_buffers(struct pci_dev *dev)
1999{
2000 struct pci_cap_saved_state *tmp;
2001 struct hlist_node *pos, *n;
2002
2003 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2004 kfree(tmp);
2005}
2006
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01002007/**
Yu Zhao58c3a722008-10-14 14:02:53 +08002008 * pci_enable_ari - enable ARI forwarding if hardware support it
2009 * @dev: the PCI device
2010 */
2011void pci_enable_ari(struct pci_dev *dev)
2012{
2013 int pos;
2014 u32 cap;
Myron Stowec463b8c2012-06-01 15:16:37 -06002015 u16 ctrl;
Zhao, Yu81135872008-10-23 13:15:39 +08002016 struct pci_dev *bridge;
Yu Zhao58c3a722008-10-14 14:02:53 +08002017
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +01002018 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
Yu Zhao58c3a722008-10-14 14:02:53 +08002019 return;
2020
Zhao, Yu81135872008-10-23 13:15:39 +08002021 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
Yu Zhao58c3a722008-10-14 14:02:53 +08002022 if (!pos)
2023 return;
2024
Zhao, Yu81135872008-10-23 13:15:39 +08002025 bridge = dev->bus->self;
Myron Stowecb97ae32012-06-01 15:16:31 -06002026 if (!bridge)
Zhao, Yu81135872008-10-23 13:15:39 +08002027 return;
2028
Myron Stowec463b8c2012-06-01 15:16:37 -06002029 /* ARI is a PCIe cap v2 feature */
2030 pos = pci_pcie_cap2(bridge);
Zhao, Yu81135872008-10-23 13:15:39 +08002031 if (!pos)
2032 return;
2033
2034 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
Yu Zhao58c3a722008-10-14 14:02:53 +08002035 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2036 return;
2037
Zhao, Yu81135872008-10-23 13:15:39 +08002038 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
Yu Zhao58c3a722008-10-14 14:02:53 +08002039 ctrl |= PCI_EXP_DEVCTL2_ARI;
Zhao, Yu81135872008-10-23 13:15:39 +08002040 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
Yu Zhao58c3a722008-10-14 14:02:53 +08002041
Zhao, Yu81135872008-10-23 13:15:39 +08002042 bridge->ari_enabled = 1;
Yu Zhao58c3a722008-10-14 14:02:53 +08002043}
2044
Jesse Barnesb48d4422010-10-19 13:07:57 -07002045/**
Myron Stowec463b8c2012-06-01 15:16:37 -06002046 * pci_enable_ido - enable ID-based Ordering on a device
Jesse Barnesb48d4422010-10-19 13:07:57 -07002047 * @dev: the PCI device
2048 * @type: which types of IDO to enable
2049 *
2050 * Enable ID-based ordering on @dev. @type can contain the bits
2051 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2052 * which types of transactions are allowed to be re-ordered.
2053 */
2054void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2055{
2056 int pos;
2057 u16 ctrl;
2058
Myron Stowec463b8c2012-06-01 15:16:37 -06002059 /* ID-based Ordering is a PCIe cap v2 feature */
2060 pos = pci_pcie_cap2(dev);
Jesse Barnesb48d4422010-10-19 13:07:57 -07002061 if (!pos)
2062 return;
2063
2064 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2065 if (type & PCI_EXP_IDO_REQUEST)
2066 ctrl |= PCI_EXP_IDO_REQ_EN;
2067 if (type & PCI_EXP_IDO_COMPLETION)
2068 ctrl |= PCI_EXP_IDO_CMP_EN;
2069 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2070}
2071EXPORT_SYMBOL(pci_enable_ido);
2072
2073/**
2074 * pci_disable_ido - disable ID-based ordering on a device
2075 * @dev: the PCI device
2076 * @type: which types of IDO to disable
2077 */
2078void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2079{
2080 int pos;
2081 u16 ctrl;
2082
Myron Stowec463b8c2012-06-01 15:16:37 -06002083 /* ID-based Ordering is a PCIe cap v2 feature */
2084 pos = pci_pcie_cap2(dev);
Jesse Barnesb48d4422010-10-19 13:07:57 -07002085 if (!pos)
2086 return;
2087
2088 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2089 if (type & PCI_EXP_IDO_REQUEST)
2090 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2091 if (type & PCI_EXP_IDO_COMPLETION)
2092 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2093 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2094}
2095EXPORT_SYMBOL(pci_disable_ido);
2096
Jesse Barnes48a92a82011-01-10 12:46:36 -08002097/**
2098 * pci_enable_obff - enable optimized buffer flush/fill
2099 * @dev: PCI device
2100 * @type: type of signaling to use
2101 *
2102 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2103 * signaling if possible, falling back to message signaling only if
2104 * WAKE# isn't supported. @type should indicate whether the PCIe link
2105 * be brought out of L0s or L1 to send the message. It should be either
2106 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2107 *
2108 * If your device can benefit from receiving all messages, even at the
2109 * power cost of bringing the link back up from a low power state, use
2110 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2111 * preferred type).
2112 *
2113 * RETURNS:
2114 * Zero on success, appropriate error number on failure.
2115 */
2116int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2117{
2118 int pos;
2119 u32 cap;
2120 u16 ctrl;
2121 int ret;
2122
Myron Stowec463b8c2012-06-01 15:16:37 -06002123 /* OBFF is a PCIe cap v2 feature */
2124 pos = pci_pcie_cap2(dev);
Jesse Barnes48a92a82011-01-10 12:46:36 -08002125 if (!pos)
2126 return -ENOTSUPP;
2127
2128 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2129 if (!(cap & PCI_EXP_OBFF_MASK))
2130 return -ENOTSUPP; /* no OBFF support at all */
2131
2132 /* Make sure the topology supports OBFF as well */
2133 if (dev->bus) {
2134 ret = pci_enable_obff(dev->bus->self, type);
2135 if (ret)
2136 return ret;
2137 }
2138
2139 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2140 if (cap & PCI_EXP_OBFF_WAKE)
2141 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2142 else {
2143 switch (type) {
2144 case PCI_EXP_OBFF_SIGNAL_L0:
2145 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2146 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2147 break;
2148 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2149 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2150 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2151 break;
2152 default:
2153 WARN(1, "bad OBFF signal type\n");
2154 return -ENOTSUPP;
2155 }
2156 }
2157 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2158
2159 return 0;
2160}
2161EXPORT_SYMBOL(pci_enable_obff);
2162
2163/**
2164 * pci_disable_obff - disable optimized buffer flush/fill
2165 * @dev: PCI device
2166 *
2167 * Disable OBFF on @dev.
2168 */
2169void pci_disable_obff(struct pci_dev *dev)
2170{
2171 int pos;
2172 u16 ctrl;
2173
Myron Stowec463b8c2012-06-01 15:16:37 -06002174 /* OBFF is a PCIe cap v2 feature */
2175 pos = pci_pcie_cap2(dev);
Jesse Barnes48a92a82011-01-10 12:46:36 -08002176 if (!pos)
2177 return;
2178
2179 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2180 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2181 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2182}
2183EXPORT_SYMBOL(pci_disable_obff);
2184
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002185/**
2186 * pci_ltr_supported - check whether a device supports LTR
2187 * @dev: PCI device
2188 *
2189 * RETURNS:
2190 * True if @dev supports latency tolerance reporting, false otherwise.
2191 */
Myron Stowec32823f2012-06-01 15:16:25 -06002192static bool pci_ltr_supported(struct pci_dev *dev)
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002193{
2194 int pos;
2195 u32 cap;
2196
Myron Stowec463b8c2012-06-01 15:16:37 -06002197 /* LTR is a PCIe cap v2 feature */
2198 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002199 if (!pos)
2200 return false;
2201
2202 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2203
2204 return cap & PCI_EXP_DEVCAP2_LTR;
2205}
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002206
2207/**
2208 * pci_enable_ltr - enable latency tolerance reporting
2209 * @dev: PCI device
2210 *
2211 * Enable LTR on @dev if possible, which means enabling it first on
2212 * upstream ports.
2213 *
2214 * RETURNS:
2215 * Zero on success, errno on failure.
2216 */
2217int pci_enable_ltr(struct pci_dev *dev)
2218{
2219 int pos;
2220 u16 ctrl;
2221 int ret;
2222
2223 if (!pci_ltr_supported(dev))
2224 return -ENOTSUPP;
2225
Myron Stowec463b8c2012-06-01 15:16:37 -06002226 /* LTR is a PCIe cap v2 feature */
2227 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002228 if (!pos)
2229 return -ENOTSUPP;
2230
2231 /* Only primary function can enable/disable LTR */
2232 if (PCI_FUNC(dev->devfn) != 0)
2233 return -EINVAL;
2234
2235 /* Enable upstream ports first */
2236 if (dev->bus) {
2237 ret = pci_enable_ltr(dev->bus->self);
2238 if (ret)
2239 return ret;
2240 }
2241
2242 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2243 ctrl |= PCI_EXP_LTR_EN;
2244 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2245
2246 return 0;
2247}
2248EXPORT_SYMBOL(pci_enable_ltr);
2249
2250/**
2251 * pci_disable_ltr - disable latency tolerance reporting
2252 * @dev: PCI device
2253 */
2254void pci_disable_ltr(struct pci_dev *dev)
2255{
2256 int pos;
2257 u16 ctrl;
2258
2259 if (!pci_ltr_supported(dev))
2260 return;
2261
Myron Stowec463b8c2012-06-01 15:16:37 -06002262 /* LTR is a PCIe cap v2 feature */
2263 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002264 if (!pos)
2265 return;
2266
2267 /* Only primary function can enable/disable LTR */
2268 if (PCI_FUNC(dev->devfn) != 0)
2269 return;
2270
2271 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2272 ctrl &= ~PCI_EXP_LTR_EN;
2273 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2274}
2275EXPORT_SYMBOL(pci_disable_ltr);
2276
2277static int __pci_ltr_scale(int *val)
2278{
2279 int scale = 0;
2280
2281 while (*val > 1023) {
2282 *val = (*val + 31) / 32;
2283 scale++;
2284 }
2285 return scale;
2286}
2287
2288/**
2289 * pci_set_ltr - set LTR latency values
2290 * @dev: PCI device
2291 * @snoop_lat_ns: snoop latency in nanoseconds
2292 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2293 *
2294 * Figure out the scale and set the LTR values accordingly.
2295 */
2296int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2297{
2298 int pos, ret, snoop_scale, nosnoop_scale;
2299 u16 val;
2300
2301 if (!pci_ltr_supported(dev))
2302 return -ENOTSUPP;
2303
2304 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2305 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2306
2307 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2308 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2309 return -EINVAL;
2310
2311 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2312 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2313 return -EINVAL;
2314
2315 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2316 if (!pos)
2317 return -ENOTSUPP;
2318
2319 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2320 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2321 if (ret != 4)
2322 return -EIO;
2323
2324 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2325 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2326 if (ret != 4)
2327 return -EIO;
2328
2329 return 0;
2330}
2331EXPORT_SYMBOL(pci_set_ltr);
2332
Chris Wright5d990b62009-12-04 12:15:21 -08002333static int pci_acs_enable;
2334
2335/**
2336 * pci_request_acs - ask for ACS to be enabled if supported
2337 */
2338void pci_request_acs(void)
2339{
2340 pci_acs_enable = 1;
2341}
2342
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002343/**
Allen Kayae21ee62009-10-07 10:27:17 -07002344 * pci_enable_acs - enable ACS if hardware support it
2345 * @dev: the PCI device
2346 */
2347void pci_enable_acs(struct pci_dev *dev)
2348{
2349 int pos;
2350 u16 cap;
2351 u16 ctrl;
2352
Chris Wright5d990b62009-12-04 12:15:21 -08002353 if (!pci_acs_enable)
2354 return;
2355
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002356 if (!pci_is_pcie(dev))
Allen Kayae21ee62009-10-07 10:27:17 -07002357 return;
2358
2359 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2360 if (!pos)
2361 return;
2362
2363 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2364 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2365
2366 /* Source Validation */
2367 ctrl |= (cap & PCI_ACS_SV);
2368
2369 /* P2P Request Redirect */
2370 ctrl |= (cap & PCI_ACS_RR);
2371
2372 /* P2P Completion Redirect */
2373 ctrl |= (cap & PCI_ACS_CR);
2374
2375 /* Upstream Forwarding */
2376 ctrl |= (cap & PCI_ACS_UF);
2377
2378 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2379}
2380
2381/**
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002382 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2383 * @dev: the PCI device
2384 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2385 *
2386 * Perform INTx swizzling for a device behind one level of bridge. This is
2387 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
Matthew Wilcox46b952a2009-07-01 14:24:30 -07002388 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2389 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2390 * the PCI Express Base Specification, Revision 2.1)
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002391 */
John Crispin3df425f2012-04-12 17:33:07 +02002392u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002393{
Matthew Wilcox46b952a2009-07-01 14:24:30 -07002394 int slot;
2395
2396 if (pci_ari_enabled(dev->bus))
2397 slot = 0;
2398 else
2399 slot = PCI_SLOT(dev->devfn);
2400
2401 return (((pin - 1) + slot) % 4) + 1;
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002402}
2403
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404int
2405pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2406{
2407 u8 pin;
2408
Kristen Accardi514d2072005-11-02 16:24:39 -08002409 pin = dev->pin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 if (!pin)
2411 return -1;
Bjorn Helgaas878f2e52008-12-09 16:11:46 -07002412
Kenji Kaneshige8784fd42009-05-26 16:07:33 +09002413 while (!pci_is_root_bus(dev->bus)) {
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002414 pin = pci_swizzle_interrupt_pin(dev, pin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 dev = dev->bus->self;
2416 }
2417 *bridge = dev;
2418 return pin;
2419}
2420
2421/**
Bjorn Helgaas68feac82008-12-16 21:36:55 -07002422 * pci_common_swizzle - swizzle INTx all the way to root bridge
2423 * @dev: the PCI device
2424 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2425 *
2426 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2427 * bridges all the way up to a PCI root bus.
2428 */
2429u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2430{
2431 u8 pin = *pinp;
2432
Kenji Kaneshige1eb39482009-05-26 16:08:36 +09002433 while (!pci_is_root_bus(dev->bus)) {
Bjorn Helgaas68feac82008-12-16 21:36:55 -07002434 pin = pci_swizzle_interrupt_pin(dev, pin);
2435 dev = dev->bus->self;
2436 }
2437 *pinp = pin;
2438 return PCI_SLOT(dev->devfn);
2439}
2440
2441/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 * pci_release_region - Release a PCI bar
2443 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2444 * @bar: BAR to release
2445 *
2446 * Releases the PCI I/O and memory resources previously reserved by a
2447 * successful call to pci_request_region. Call this function only
2448 * after all use of the PCI regions has ceased.
2449 */
2450void pci_release_region(struct pci_dev *pdev, int bar)
2451{
Tejun Heo9ac78492007-01-20 16:00:26 +09002452 struct pci_devres *dr;
2453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 if (pci_resource_len(pdev, bar) == 0)
2455 return;
2456 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2457 release_region(pci_resource_start(pdev, bar),
2458 pci_resource_len(pdev, bar));
2459 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2460 release_mem_region(pci_resource_start(pdev, bar),
2461 pci_resource_len(pdev, bar));
Tejun Heo9ac78492007-01-20 16:00:26 +09002462
2463 dr = find_pci_dr(pdev);
2464 if (dr)
2465 dr->region_mask &= ~(1 << bar);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466}
2467
2468/**
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002469 * __pci_request_region - Reserved PCI I/O and memory resource
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 * @pdev: PCI device whose resources are to be reserved
2471 * @bar: BAR to be reserved
2472 * @res_name: Name to be associated with resource.
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002473 * @exclusive: whether the region access is exclusive or not
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 *
2475 * Mark the PCI region associated with PCI device @pdev BR @bar as
2476 * being reserved by owner @res_name. Do not access any
2477 * address inside the PCI regions unless this call returns
2478 * successfully.
2479 *
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002480 * If @exclusive is set, then the region is marked so that userspace
2481 * is explicitly not allowed to map the resource via /dev/mem or
2482 * sysfs MMIO access.
2483 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484 * Returns 0 on success, or %EBUSY on error. A warning
2485 * message is also printed on failure.
2486 */
Arjan van de Vene8de1482008-10-22 19:55:31 -07002487static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2488 int exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489{
Tejun Heo9ac78492007-01-20 16:00:26 +09002490 struct pci_devres *dr;
2491
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 if (pci_resource_len(pdev, bar) == 0)
2493 return 0;
2494
2495 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2496 if (!request_region(pci_resource_start(pdev, bar),
2497 pci_resource_len(pdev, bar), res_name))
2498 goto err_out;
2499 }
2500 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
Arjan van de Vene8de1482008-10-22 19:55:31 -07002501 if (!__request_mem_region(pci_resource_start(pdev, bar),
2502 pci_resource_len(pdev, bar), res_name,
2503 exclusive))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 goto err_out;
2505 }
Tejun Heo9ac78492007-01-20 16:00:26 +09002506
2507 dr = find_pci_dr(pdev);
2508 if (dr)
2509 dr->region_mask |= 1 << bar;
2510
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 return 0;
2512
2513err_out:
Bjorn Helgaasc7dabef2009-10-27 13:26:47 -06002514 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
Benjamin Herrenschmidt096e6f62008-10-20 15:07:37 +11002515 &pdev->resource[bar]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 return -EBUSY;
2517}
2518
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002519/**
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002520 * pci_request_region - Reserve PCI I/O and memory resource
Arjan van de Vene8de1482008-10-22 19:55:31 -07002521 * @pdev: PCI device whose resources are to be reserved
2522 * @bar: BAR to be reserved
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002523 * @res_name: Name to be associated with resource
Arjan van de Vene8de1482008-10-22 19:55:31 -07002524 *
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002525 * Mark the PCI region associated with PCI device @pdev BAR @bar as
Arjan van de Vene8de1482008-10-22 19:55:31 -07002526 * being reserved by owner @res_name. Do not access any
2527 * address inside the PCI regions unless this call returns
2528 * successfully.
2529 *
2530 * Returns 0 on success, or %EBUSY on error. A warning
2531 * message is also printed on failure.
2532 */
2533int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2534{
2535 return __pci_request_region(pdev, bar, res_name, 0);
2536}
2537
2538/**
2539 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2540 * @pdev: PCI device whose resources are to be reserved
2541 * @bar: BAR to be reserved
2542 * @res_name: Name to be associated with resource.
2543 *
2544 * Mark the PCI region associated with PCI device @pdev BR @bar as
2545 * being reserved by owner @res_name. Do not access any
2546 * address inside the PCI regions unless this call returns
2547 * successfully.
2548 *
2549 * Returns 0 on success, or %EBUSY on error. A warning
2550 * message is also printed on failure.
2551 *
2552 * The key difference that _exclusive makes it that userspace is
2553 * explicitly not allowed to map the resource via /dev/mem or
2554 * sysfs.
2555 */
2556int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2557{
2558 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2559}
2560/**
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002561 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2562 * @pdev: PCI device whose resources were previously reserved
2563 * @bars: Bitmask of BARs to be released
2564 *
2565 * Release selected PCI I/O and memory resources previously reserved.
2566 * Call this function only after all use of the PCI regions has ceased.
2567 */
2568void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2569{
2570 int i;
2571
2572 for (i = 0; i < 6; i++)
2573 if (bars & (1 << i))
2574 pci_release_region(pdev, i);
2575}
2576
Arjan van de Vene8de1482008-10-22 19:55:31 -07002577int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2578 const char *res_name, int excl)
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002579{
2580 int i;
2581
2582 for (i = 0; i < 6; i++)
2583 if (bars & (1 << i))
Arjan van de Vene8de1482008-10-22 19:55:31 -07002584 if (__pci_request_region(pdev, i, res_name, excl))
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002585 goto err_out;
2586 return 0;
2587
2588err_out:
2589 while(--i >= 0)
2590 if (bars & (1 << i))
2591 pci_release_region(pdev, i);
2592
2593 return -EBUSY;
2594}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
Arjan van de Vene8de1482008-10-22 19:55:31 -07002596
2597/**
2598 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2599 * @pdev: PCI device whose resources are to be reserved
2600 * @bars: Bitmask of BARs to be requested
2601 * @res_name: Name to be associated with resource
2602 */
2603int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2604 const char *res_name)
2605{
2606 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2607}
2608
2609int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2610 int bars, const char *res_name)
2611{
2612 return __pci_request_selected_regions(pdev, bars, res_name,
2613 IORESOURCE_EXCLUSIVE);
2614}
2615
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616/**
2617 * pci_release_regions - Release reserved PCI I/O and memory resources
2618 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2619 *
2620 * Releases all PCI I/O and memory resources previously reserved by a
2621 * successful call to pci_request_regions. Call this function only
2622 * after all use of the PCI regions has ceased.
2623 */
2624
2625void pci_release_regions(struct pci_dev *pdev)
2626{
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002627 pci_release_selected_regions(pdev, (1 << 6) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628}
2629
2630/**
2631 * pci_request_regions - Reserved PCI I/O and memory resources
2632 * @pdev: PCI device whose resources are to be reserved
2633 * @res_name: Name to be associated with resource.
2634 *
2635 * Mark all PCI regions associated with PCI device @pdev as
2636 * being reserved by owner @res_name. Do not access any
2637 * address inside the PCI regions unless this call returns
2638 * successfully.
2639 *
2640 * Returns 0 on success, or %EBUSY on error. A warning
2641 * message is also printed on failure.
2642 */
Jeff Garzik3c990e92006-03-04 21:52:42 -05002643int pci_request_regions(struct pci_dev *pdev, const char *res_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644{
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002645 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646}
2647
2648/**
Arjan van de Vene8de1482008-10-22 19:55:31 -07002649 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2650 * @pdev: PCI device whose resources are to be reserved
2651 * @res_name: Name to be associated with resource.
2652 *
2653 * Mark all PCI regions associated with PCI device @pdev as
2654 * being reserved by owner @res_name. Do not access any
2655 * address inside the PCI regions unless this call returns
2656 * successfully.
2657 *
2658 * pci_request_regions_exclusive() will mark the region so that
2659 * /dev/mem and the sysfs MMIO access will not be allowed.
2660 *
2661 * Returns 0 on success, or %EBUSY on error. A warning
2662 * message is also printed on failure.
2663 */
2664int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2665{
2666 return pci_request_selected_regions_exclusive(pdev,
2667 ((1 << 6) - 1), res_name);
2668}
2669
Ben Hutchings6a479072008-12-23 03:08:29 +00002670static void __pci_set_master(struct pci_dev *dev, bool enable)
2671{
2672 u16 old_cmd, cmd;
2673
2674 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2675 if (enable)
2676 cmd = old_cmd | PCI_COMMAND_MASTER;
2677 else
2678 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2679 if (cmd != old_cmd) {
2680 dev_dbg(&dev->dev, "%s bus mastering\n",
2681 enable ? "enabling" : "disabling");
2682 pci_write_config_word(dev, PCI_COMMAND, cmd);
2683 }
2684 dev->is_busmaster = enable;
2685}
Arjan van de Vene8de1482008-10-22 19:55:31 -07002686
2687/**
Myron Stowe96c55902011-10-28 15:48:38 -06002688 * pcibios_set_master - enable PCI bus-mastering for device dev
2689 * @dev: the PCI device to enable
2690 *
2691 * Enables PCI bus-mastering for the device. This is the default
2692 * implementation. Architecture specific implementations can override
2693 * this if necessary.
2694 */
2695void __weak pcibios_set_master(struct pci_dev *dev)
2696{
2697 u8 lat;
2698
Myron Stowef6766782011-10-28 15:49:20 -06002699 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2700 if (pci_is_pcie(dev))
2701 return;
2702
Myron Stowe96c55902011-10-28 15:48:38 -06002703 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2704 if (lat < 16)
2705 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2706 else if (lat > pcibios_max_latency)
2707 lat = pcibios_max_latency;
2708 else
2709 return;
2710 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2711 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2712}
2713
2714/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 * pci_set_master - enables bus-mastering for device dev
2716 * @dev: the PCI device to enable
2717 *
2718 * Enables bus-mastering on the device and calls pcibios_set_master()
2719 * to do the needed arch specific settings.
2720 */
Ben Hutchings6a479072008-12-23 03:08:29 +00002721void pci_set_master(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722{
Ben Hutchings6a479072008-12-23 03:08:29 +00002723 __pci_set_master(dev, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 pcibios_set_master(dev);
2725}
2726
Ben Hutchings6a479072008-12-23 03:08:29 +00002727/**
2728 * pci_clear_master - disables bus-mastering for device dev
2729 * @dev: the PCI device to disable
2730 */
2731void pci_clear_master(struct pci_dev *dev)
2732{
2733 __pci_set_master(dev, false);
2734}
2735
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736/**
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002737 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2738 * @dev: the PCI device for which MWI is to be enabled
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 *
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002740 * Helper function for pci_set_mwi.
2741 * Originally copied from drivers/net/acenic.c.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2743 *
2744 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2745 */
Tejun Heo15ea76d2009-09-22 17:34:48 +09002746int pci_set_cacheline_size(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747{
2748 u8 cacheline_size;
2749
2750 if (!pci_cache_line_size)
Tejun Heo15ea76d2009-09-22 17:34:48 +09002751 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
2753 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2754 equal to or multiple of the right value. */
2755 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2756 if (cacheline_size >= pci_cache_line_size &&
2757 (cacheline_size % pci_cache_line_size) == 0)
2758 return 0;
2759
2760 /* Write the correct value. */
2761 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2762 /* Read it back. */
2763 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2764 if (cacheline_size == pci_cache_line_size)
2765 return 0;
2766
Bjorn Helgaas80ccba12008-06-13 10:52:11 -06002767 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2768 "supported\n", pci_cache_line_size << 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
2770 return -EINVAL;
2771}
Tejun Heo15ea76d2009-09-22 17:34:48 +09002772EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2773
2774#ifdef PCI_DISABLE_MWI
2775int pci_set_mwi(struct pci_dev *dev)
2776{
2777 return 0;
2778}
2779
2780int pci_try_set_mwi(struct pci_dev *dev)
2781{
2782 return 0;
2783}
2784
2785void pci_clear_mwi(struct pci_dev *dev)
2786{
2787}
2788
2789#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790
2791/**
2792 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2793 * @dev: the PCI device for which MWI is enabled
2794 *
Randy Dunlap694625c2007-07-09 11:55:54 -07002795 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 *
2797 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2798 */
2799int
2800pci_set_mwi(struct pci_dev *dev)
2801{
2802 int rc;
2803 u16 cmd;
2804
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002805 rc = pci_set_cacheline_size(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 if (rc)
2807 return rc;
2808
2809 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2810 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
Bjorn Helgaas80ccba12008-06-13 10:52:11 -06002811 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 cmd |= PCI_COMMAND_INVALIDATE;
2813 pci_write_config_word(dev, PCI_COMMAND, cmd);
2814 }
2815
2816 return 0;
2817}
2818
2819/**
Randy Dunlap694625c2007-07-09 11:55:54 -07002820 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2821 * @dev: the PCI device for which MWI is enabled
2822 *
2823 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2824 * Callers are not required to check the return value.
2825 *
2826 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2827 */
2828int pci_try_set_mwi(struct pci_dev *dev)
2829{
2830 int rc = pci_set_mwi(dev);
2831 return rc;
2832}
2833
2834/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2836 * @dev: the PCI device to disable
2837 *
2838 * Disables PCI Memory-Write-Invalidate transaction on the device
2839 */
2840void
2841pci_clear_mwi(struct pci_dev *dev)
2842{
2843 u16 cmd;
2844
2845 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2846 if (cmd & PCI_COMMAND_INVALIDATE) {
2847 cmd &= ~PCI_COMMAND_INVALIDATE;
2848 pci_write_config_word(dev, PCI_COMMAND, cmd);
2849 }
2850}
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002851#endif /* ! PCI_DISABLE_MWI */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
Brett M Russa04ce0f2005-08-15 15:23:41 -04002853/**
2854 * pci_intx - enables/disables PCI INTx for device dev
Randy Dunlap8f7020d2005-10-23 11:57:38 -07002855 * @pdev: the PCI device to operate on
2856 * @enable: boolean: whether to enable or disable PCI INTx
Brett M Russa04ce0f2005-08-15 15:23:41 -04002857 *
2858 * Enables/disables PCI INTx for device dev
2859 */
2860void
2861pci_intx(struct pci_dev *pdev, int enable)
2862{
2863 u16 pci_command, new;
2864
2865 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2866
2867 if (enable) {
2868 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2869 } else {
2870 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2871 }
2872
2873 if (new != pci_command) {
Tejun Heo9ac78492007-01-20 16:00:26 +09002874 struct pci_devres *dr;
2875
Brett M Russ2fd9d742005-09-09 10:02:22 -07002876 pci_write_config_word(pdev, PCI_COMMAND, new);
Tejun Heo9ac78492007-01-20 16:00:26 +09002877
2878 dr = find_pci_dr(pdev);
2879 if (dr && !dr->restore_intx) {
2880 dr->restore_intx = 1;
2881 dr->orig_intx = !enable;
2882 }
Brett M Russa04ce0f2005-08-15 15:23:41 -04002883 }
2884}
2885
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08002886/**
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002887 * pci_intx_mask_supported - probe for INTx masking support
Randy Dunlap6e9292c2012-01-21 11:02:35 -08002888 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002889 *
2890 * Check if the device dev support INTx masking via the config space
2891 * command word.
2892 */
2893bool pci_intx_mask_supported(struct pci_dev *dev)
2894{
2895 bool mask_supported = false;
2896 u16 orig, new;
2897
2898 pci_cfg_access_lock(dev);
2899
2900 pci_read_config_word(dev, PCI_COMMAND, &orig);
2901 pci_write_config_word(dev, PCI_COMMAND,
2902 orig ^ PCI_COMMAND_INTX_DISABLE);
2903 pci_read_config_word(dev, PCI_COMMAND, &new);
2904
2905 /*
2906 * There's no way to protect against hardware bugs or detect them
2907 * reliably, but as long as we know what the value should be, let's
2908 * go ahead and check it.
2909 */
2910 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2911 dev_err(&dev->dev, "Command register changed from "
2912 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2913 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2914 mask_supported = true;
2915 pci_write_config_word(dev, PCI_COMMAND, orig);
2916 }
2917
2918 pci_cfg_access_unlock(dev);
2919 return mask_supported;
2920}
2921EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2922
2923static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2924{
2925 struct pci_bus *bus = dev->bus;
2926 bool mask_updated = true;
2927 u32 cmd_status_dword;
2928 u16 origcmd, newcmd;
2929 unsigned long flags;
2930 bool irq_pending;
2931
2932 /*
2933 * We do a single dword read to retrieve both command and status.
2934 * Document assumptions that make this possible.
2935 */
2936 BUILD_BUG_ON(PCI_COMMAND % 4);
2937 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2938
2939 raw_spin_lock_irqsave(&pci_lock, flags);
2940
2941 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2942
2943 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2944
2945 /*
2946 * Check interrupt status register to see whether our device
2947 * triggered the interrupt (when masking) or the next IRQ is
2948 * already pending (when unmasking).
2949 */
2950 if (mask != irq_pending) {
2951 mask_updated = false;
2952 goto done;
2953 }
2954
2955 origcmd = cmd_status_dword;
2956 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2957 if (mask)
2958 newcmd |= PCI_COMMAND_INTX_DISABLE;
2959 if (newcmd != origcmd)
2960 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2961
2962done:
2963 raw_spin_unlock_irqrestore(&pci_lock, flags);
2964
2965 return mask_updated;
2966}
2967
2968/**
2969 * pci_check_and_mask_intx - mask INTx on pending interrupt
Randy Dunlap6e9292c2012-01-21 11:02:35 -08002970 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002971 *
2972 * Check if the device dev has its INTx line asserted, mask it and
2973 * return true in that case. False is returned if not interrupt was
2974 * pending.
2975 */
2976bool pci_check_and_mask_intx(struct pci_dev *dev)
2977{
2978 return pci_check_and_set_intx_mask(dev, true);
2979}
2980EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2981
2982/**
2983 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
Randy Dunlap6e9292c2012-01-21 11:02:35 -08002984 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002985 *
2986 * Check if the device dev has its INTx line asserted, unmask it if not
2987 * and return true. False is returned and the mask remains active if
2988 * there was still an interrupt pending.
2989 */
2990bool pci_check_and_unmask_intx(struct pci_dev *dev)
2991{
2992 return pci_check_and_set_intx_mask(dev, false);
2993}
2994EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2995
2996/**
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08002997 * pci_msi_off - disables any msi or msix capabilities
Randy Dunlap8d7d86e2007-03-16 19:55:52 -07002998 * @dev: the PCI device to operate on
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08002999 *
3000 * If you want to use msi see pci_enable_msi and friends.
3001 * This is a lower level primitive that allows us to disable
3002 * msi operation at the device level.
3003 */
3004void pci_msi_off(struct pci_dev *dev)
3005{
3006 int pos;
3007 u16 control;
3008
3009 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3010 if (pos) {
3011 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3012 control &= ~PCI_MSI_FLAGS_ENABLE;
3013 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3014 }
3015 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3016 if (pos) {
3017 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3018 control &= ~PCI_MSIX_FLAGS_ENABLE;
3019 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3020 }
3021}
Michael S. Tsirkinb03214d2010-06-23 22:49:06 -06003022EXPORT_SYMBOL_GPL(pci_msi_off);
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003023
FUJITA Tomonori4d57cdf2008-02-04 22:27:55 -08003024int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3025{
3026 return dma_set_max_seg_size(&dev->dev, size);
3027}
3028EXPORT_SYMBOL(pci_set_dma_max_seg_size);
FUJITA Tomonori4d57cdf2008-02-04 22:27:55 -08003029
FUJITA Tomonori59fc67d2008-02-04 22:28:14 -08003030int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3031{
3032 return dma_set_seg_boundary(&dev->dev, mask);
3033}
3034EXPORT_SYMBOL(pci_set_dma_seg_boundary);
FUJITA Tomonori59fc67d2008-02-04 22:28:14 -08003035
Yu Zhao8c1c6992009-06-13 15:52:13 +08003036static int pcie_flr(struct pci_dev *dev, int probe)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003037{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003038 int i;
3039 int pos;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003040 u32 cap;
Shmulik Ravid04b55c42009-12-03 22:27:51 +02003041 u16 status, control;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003042
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003043 pos = pci_pcie_cap(dev);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003044 if (!pos)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003045 return -ENOTTY;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003046
3047 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003048 if (!(cap & PCI_EXP_DEVCAP_FLR))
3049 return -ENOTTY;
3050
Sheng Yangd91cdc72008-11-11 17:17:47 +08003051 if (probe)
3052 return 0;
3053
Sheng Yang8dd7f802008-10-21 17:38:25 +08003054 /* Wait for Transaction Pending bit clean */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003055 for (i = 0; i < 4; i++) {
3056 if (i)
3057 msleep((1 << (i - 1)) * 100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003058
Yu Zhao8c1c6992009-06-13 15:52:13 +08003059 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3060 if (!(status & PCI_EXP_DEVSTA_TRPND))
3061 goto clear;
3062 }
Sheng Yang8dd7f802008-10-21 17:38:25 +08003063
Yu Zhao8c1c6992009-06-13 15:52:13 +08003064 dev_err(&dev->dev, "transaction is not cleared; "
3065 "proceeding with reset anyway\n");
Sheng Yang5fe5db02009-02-09 14:53:47 +08003066
Yu Zhao8c1c6992009-06-13 15:52:13 +08003067clear:
Shmulik Ravid04b55c42009-12-03 22:27:51 +02003068 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3069 control |= PCI_EXP_DEVCTL_BCR_FLR;
3070 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3071
Yu Zhao8c1c6992009-06-13 15:52:13 +08003072 msleep(100);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003073
Sheng Yang8dd7f802008-10-21 17:38:25 +08003074 return 0;
3075}
Sheng Yangd91cdc72008-11-11 17:17:47 +08003076
Yu Zhao8c1c6992009-06-13 15:52:13 +08003077static int pci_af_flr(struct pci_dev *dev, int probe)
Sheng Yang1ca88792008-11-11 17:17:48 +08003078{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003079 int i;
3080 int pos;
Sheng Yang1ca88792008-11-11 17:17:48 +08003081 u8 cap;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003082 u8 status;
Sheng Yang1ca88792008-11-11 17:17:48 +08003083
Yu Zhao8c1c6992009-06-13 15:52:13 +08003084 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3085 if (!pos)
Sheng Yang1ca88792008-11-11 17:17:48 +08003086 return -ENOTTY;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003087
3088 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
Sheng Yang1ca88792008-11-11 17:17:48 +08003089 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3090 return -ENOTTY;
3091
3092 if (probe)
3093 return 0;
3094
Sheng Yang1ca88792008-11-11 17:17:48 +08003095 /* Wait for Transaction Pending bit clean */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003096 for (i = 0; i < 4; i++) {
3097 if (i)
3098 msleep((1 << (i - 1)) * 100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003099
Yu Zhao8c1c6992009-06-13 15:52:13 +08003100 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3101 if (!(status & PCI_AF_STATUS_TP))
3102 goto clear;
3103 }
3104
3105 dev_err(&dev->dev, "transaction is not cleared; "
3106 "proceeding with reset anyway\n");
3107
3108clear:
3109 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
Sheng Yang1ca88792008-11-11 17:17:48 +08003110 msleep(100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003111
Sheng Yang1ca88792008-11-11 17:17:48 +08003112 return 0;
3113}
3114
Rafael J. Wysocki83d74e02011-03-05 21:48:44 +01003115/**
3116 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3117 * @dev: Device to reset.
3118 * @probe: If set, only check if the device can be reset this way.
3119 *
3120 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3121 * unset, it will be reinitialized internally when going from PCI_D3hot to
3122 * PCI_D0. If that's the case and the device is not in a low-power state
3123 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3124 *
3125 * NOTE: This causes the caller to sleep for twice the device power transition
3126 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3127 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3128 * Moreover, only devices in D0 can be reset by this function.
3129 */
Yu Zhaof85876b2009-06-13 15:52:14 +08003130static int pci_pm_reset(struct pci_dev *dev, int probe)
Sheng Yangd91cdc72008-11-11 17:17:47 +08003131{
Yu Zhaof85876b2009-06-13 15:52:14 +08003132 u16 csr;
Sheng Yangd91cdc72008-11-11 17:17:47 +08003133
Yu Zhaof85876b2009-06-13 15:52:14 +08003134 if (!dev->pm_cap)
3135 return -ENOTTY;
Sheng Yangd91cdc72008-11-11 17:17:47 +08003136
Yu Zhaof85876b2009-06-13 15:52:14 +08003137 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3138 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3139 return -ENOTTY;
Sheng Yang1ca88792008-11-11 17:17:48 +08003140
Yu Zhaof85876b2009-06-13 15:52:14 +08003141 if (probe)
3142 return 0;
3143
3144 if (dev->current_state != PCI_D0)
3145 return -EINVAL;
3146
3147 csr &= ~PCI_PM_CTRL_STATE_MASK;
3148 csr |= PCI_D3hot;
3149 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01003150 pci_dev_d3_sleep(dev);
Yu Zhaof85876b2009-06-13 15:52:14 +08003151
3152 csr &= ~PCI_PM_CTRL_STATE_MASK;
3153 csr |= PCI_D0;
3154 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01003155 pci_dev_d3_sleep(dev);
Yu Zhaof85876b2009-06-13 15:52:14 +08003156
3157 return 0;
3158}
3159
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003160static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3161{
3162 u16 ctrl;
3163 struct pci_dev *pdev;
3164
Yu Zhao654b75e2009-06-26 14:04:46 +08003165 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003166 return -ENOTTY;
3167
3168 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3169 if (pdev != dev)
3170 return -ENOTTY;
3171
3172 if (probe)
3173 return 0;
3174
3175 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3176 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3177 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3178 msleep(100);
3179
3180 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3181 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3182 msleep(100);
3183
3184 return 0;
3185}
3186
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003187static int __pci_dev_reset(struct pci_dev *dev, int probe)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003188{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003189 int rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003190
Yu Zhao8c1c6992009-06-13 15:52:13 +08003191 might_sleep();
Sheng Yang8dd7f802008-10-21 17:38:25 +08003192
Dexuan Cuib9c3b262009-12-07 13:03:21 +08003193 rc = pci_dev_specific_reset(dev, probe);
3194 if (rc != -ENOTTY)
3195 goto done;
3196
Yu Zhao8c1c6992009-06-13 15:52:13 +08003197 rc = pcie_flr(dev, probe);
3198 if (rc != -ENOTTY)
3199 goto done;
3200
3201 rc = pci_af_flr(dev, probe);
Yu Zhaof85876b2009-06-13 15:52:14 +08003202 if (rc != -ENOTTY)
3203 goto done;
3204
3205 rc = pci_pm_reset(dev, probe);
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003206 if (rc != -ENOTTY)
3207 goto done;
3208
3209 rc = pci_parent_bus_reset(dev, probe);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003210done:
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003211 return rc;
3212}
3213
3214static int pci_dev_reset(struct pci_dev *dev, int probe)
3215{
3216 int rc;
3217
3218 if (!probe) {
3219 pci_cfg_access_lock(dev);
3220 /* block PM suspend, driver probe, etc. */
3221 device_lock(&dev->dev);
3222 }
3223
3224 rc = __pci_dev_reset(dev, probe);
3225
Yu Zhao8c1c6992009-06-13 15:52:13 +08003226 if (!probe) {
Greg Kroah-Hartman8e9394c2010-02-17 10:57:05 -08003227 device_unlock(&dev->dev);
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01003228 pci_cfg_access_unlock(dev);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003229 }
Yu Zhao8c1c6992009-06-13 15:52:13 +08003230 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003231}
Sheng Yang8dd7f802008-10-21 17:38:25 +08003232/**
Yu Zhao8c1c6992009-06-13 15:52:13 +08003233 * __pci_reset_function - reset a PCI device function
3234 * @dev: PCI device to reset
Sheng Yang8dd7f802008-10-21 17:38:25 +08003235 *
3236 * Some devices allow an individual function to be reset without affecting
3237 * other functions in the same device. The PCI device must be responsive
3238 * to PCI config space in order to use this function.
3239 *
3240 * The device function is presumed to be unused when this function is called.
3241 * Resetting the device will make the contents of PCI configuration space
3242 * random, so any caller of this must be prepared to reinitialise the
3243 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3244 * etc.
3245 *
Yu Zhao8c1c6992009-06-13 15:52:13 +08003246 * Returns 0 if the device function was successfully reset or negative if the
Sheng Yang8dd7f802008-10-21 17:38:25 +08003247 * device doesn't support resetting a single function.
3248 */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003249int __pci_reset_function(struct pci_dev *dev)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003250{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003251 return pci_dev_reset(dev, 0);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003252}
Yu Zhao8c1c6992009-06-13 15:52:13 +08003253EXPORT_SYMBOL_GPL(__pci_reset_function);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003254
3255/**
Konrad Rzeszutek Wilk6fbf9e72012-01-12 12:06:46 -05003256 * __pci_reset_function_locked - reset a PCI device function while holding
3257 * the @dev mutex lock.
3258 * @dev: PCI device to reset
3259 *
3260 * Some devices allow an individual function to be reset without affecting
3261 * other functions in the same device. The PCI device must be responsive
3262 * to PCI config space in order to use this function.
3263 *
3264 * The device function is presumed to be unused and the caller is holding
3265 * the device mutex lock when this function is called.
3266 * Resetting the device will make the contents of PCI configuration space
3267 * random, so any caller of this must be prepared to reinitialise the
3268 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3269 * etc.
3270 *
3271 * Returns 0 if the device function was successfully reset or negative if the
3272 * device doesn't support resetting a single function.
3273 */
3274int __pci_reset_function_locked(struct pci_dev *dev)
3275{
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003276 return __pci_dev_reset(dev, 0);
Konrad Rzeszutek Wilk6fbf9e72012-01-12 12:06:46 -05003277}
3278EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3279
3280/**
Michael S. Tsirkin711d5772009-07-27 23:37:48 +03003281 * pci_probe_reset_function - check whether the device can be safely reset
3282 * @dev: PCI device to reset
3283 *
3284 * Some devices allow an individual function to be reset without affecting
3285 * other functions in the same device. The PCI device must be responsive
3286 * to PCI config space in order to use this function.
3287 *
3288 * Returns 0 if the device function can be reset or negative if the
3289 * device doesn't support resetting a single function.
3290 */
3291int pci_probe_reset_function(struct pci_dev *dev)
3292{
3293 return pci_dev_reset(dev, 1);
3294}
3295
3296/**
Yu Zhao8c1c6992009-06-13 15:52:13 +08003297 * pci_reset_function - quiesce and reset a PCI device function
3298 * @dev: PCI device to reset
Sheng Yang8dd7f802008-10-21 17:38:25 +08003299 *
3300 * Some devices allow an individual function to be reset without affecting
3301 * other functions in the same device. The PCI device must be responsive
3302 * to PCI config space in order to use this function.
3303 *
3304 * This function does not just reset the PCI portion of a device, but
3305 * clears all the state associated with the device. This function differs
Yu Zhao8c1c6992009-06-13 15:52:13 +08003306 * from __pci_reset_function in that it saves and restores device state
Sheng Yang8dd7f802008-10-21 17:38:25 +08003307 * over the reset.
3308 *
Yu Zhao8c1c6992009-06-13 15:52:13 +08003309 * Returns 0 if the device function was successfully reset or negative if the
Sheng Yang8dd7f802008-10-21 17:38:25 +08003310 * device doesn't support resetting a single function.
3311 */
3312int pci_reset_function(struct pci_dev *dev)
3313{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003314 int rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003315
Yu Zhao8c1c6992009-06-13 15:52:13 +08003316 rc = pci_dev_reset(dev, 1);
3317 if (rc)
3318 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003319
Sheng Yang8dd7f802008-10-21 17:38:25 +08003320 pci_save_state(dev);
3321
Yu Zhao8c1c6992009-06-13 15:52:13 +08003322 /*
3323 * both INTx and MSI are disabled after the Interrupt Disable bit
3324 * is set and the Bus Master bit is cleared.
3325 */
Sheng Yang8dd7f802008-10-21 17:38:25 +08003326 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3327
Yu Zhao8c1c6992009-06-13 15:52:13 +08003328 rc = pci_dev_reset(dev, 0);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003329
3330 pci_restore_state(dev);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003331
Yu Zhao8c1c6992009-06-13 15:52:13 +08003332 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003333}
3334EXPORT_SYMBOL_GPL(pci_reset_function);
3335
3336/**
Peter Orubad556ad42007-05-15 13:59:13 +02003337 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3338 * @dev: PCI device to query
3339 *
3340 * Returns mmrbc: maximum designed memory read count in bytes
3341 * or appropriate error value.
3342 */
3343int pcix_get_max_mmrbc(struct pci_dev *dev)
3344{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003345 int cap;
Peter Orubad556ad42007-05-15 13:59:13 +02003346 u32 stat;
3347
3348 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3349 if (!cap)
3350 return -EINVAL;
3351
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003352 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
Peter Orubad556ad42007-05-15 13:59:13 +02003353 return -EINVAL;
3354
Dean Nelson25daeb52010-03-09 22:26:40 -05003355 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
Peter Orubad556ad42007-05-15 13:59:13 +02003356}
3357EXPORT_SYMBOL(pcix_get_max_mmrbc);
3358
3359/**
3360 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3361 * @dev: PCI device to query
3362 *
3363 * Returns mmrbc: maximum memory read count in bytes
3364 * or appropriate error value.
3365 */
3366int pcix_get_mmrbc(struct pci_dev *dev)
3367{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003368 int cap;
Dean Nelsonbdc2bda2010-03-09 22:26:48 -05003369 u16 cmd;
Peter Orubad556ad42007-05-15 13:59:13 +02003370
3371 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3372 if (!cap)
3373 return -EINVAL;
3374
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003375 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3376 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003377
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003378 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
Peter Orubad556ad42007-05-15 13:59:13 +02003379}
3380EXPORT_SYMBOL(pcix_get_mmrbc);
3381
3382/**
3383 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3384 * @dev: PCI device to query
3385 * @mmrbc: maximum memory read count in bytes
3386 * valid values are 512, 1024, 2048, 4096
3387 *
3388 * If possible sets maximum memory read byte count, some bridges have erratas
3389 * that prevent this.
3390 */
3391int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3392{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003393 int cap;
Dean Nelsonbdc2bda2010-03-09 22:26:48 -05003394 u32 stat, v, o;
3395 u16 cmd;
Peter Orubad556ad42007-05-15 13:59:13 +02003396
vignesh babu229f5af2007-08-13 18:23:14 +05303397 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003398 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003399
3400 v = ffs(mmrbc) - 10;
3401
3402 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3403 if (!cap)
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003404 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003405
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003406 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3407 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003408
3409 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3410 return -E2BIG;
3411
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003412 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3413 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003414
3415 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3416 if (o != v) {
3417 if (v > o && dev->bus &&
3418 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3419 return -EIO;
3420
3421 cmd &= ~PCI_X_CMD_MAX_READ;
3422 cmd |= v << 2;
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003423 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3424 return -EIO;
Peter Orubad556ad42007-05-15 13:59:13 +02003425 }
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003426 return 0;
Peter Orubad556ad42007-05-15 13:59:13 +02003427}
3428EXPORT_SYMBOL(pcix_set_mmrbc);
3429
3430/**
3431 * pcie_get_readrq - get PCI Express read request size
3432 * @dev: PCI device to query
3433 *
3434 * Returns maximum memory read request in bytes
3435 * or appropriate error value.
3436 */
3437int pcie_get_readrq(struct pci_dev *dev)
3438{
3439 int ret, cap;
3440 u16 ctl;
3441
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003442 cap = pci_pcie_cap(dev);
Peter Orubad556ad42007-05-15 13:59:13 +02003443 if (!cap)
3444 return -EINVAL;
3445
3446 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3447 if (!ret)
Julia Lawall93e75fa2010-08-05 22:23:16 +02003448 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
Peter Orubad556ad42007-05-15 13:59:13 +02003449
3450 return ret;
3451}
3452EXPORT_SYMBOL(pcie_get_readrq);
3453
3454/**
3455 * pcie_set_readrq - set PCI Express maximum memory read request
3456 * @dev: PCI device to query
Randy Dunlap42e61f42007-07-23 21:42:11 -07003457 * @rq: maximum memory read count in bytes
Peter Orubad556ad42007-05-15 13:59:13 +02003458 * valid values are 128, 256, 512, 1024, 2048, 4096
3459 *
Jon Masonc9b378c2011-06-28 18:26:25 -05003460 * If possible sets maximum memory read request in bytes
Peter Orubad556ad42007-05-15 13:59:13 +02003461 */
3462int pcie_set_readrq(struct pci_dev *dev, int rq)
3463{
3464 int cap, err = -EINVAL;
3465 u16 ctl, v;
3466
vignesh babu229f5af2007-08-13 18:23:14 +05303467 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
Peter Orubad556ad42007-05-15 13:59:13 +02003468 goto out;
3469
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003470 cap = pci_pcie_cap(dev);
Peter Orubad556ad42007-05-15 13:59:13 +02003471 if (!cap)
3472 goto out;
3473
3474 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3475 if (err)
3476 goto out;
Benjamin Herrenschmidta1c473a2011-10-14 14:56:15 -05003477 /*
3478 * If using the "performance" PCIe config, we clamp the
3479 * read rq size to the max packet size to prevent the
3480 * host bridge generating requests larger than we can
3481 * cope with
3482 */
3483 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3484 int mps = pcie_get_mps(dev);
3485
3486 if (mps < 0)
3487 return mps;
3488 if (mps < rq)
3489 rq = mps;
3490 }
3491
3492 v = (ffs(rq) - 8) << 12;
Peter Orubad556ad42007-05-15 13:59:13 +02003493
3494 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3495 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3496 ctl |= v;
Jon Masonc9b378c2011-06-28 18:26:25 -05003497 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
Peter Orubad556ad42007-05-15 13:59:13 +02003498 }
3499
3500out:
3501 return err;
3502}
3503EXPORT_SYMBOL(pcie_set_readrq);
3504
3505/**
Jon Masonb03e7492011-07-20 15:20:54 -05003506 * pcie_get_mps - get PCI Express maximum payload size
3507 * @dev: PCI device to query
3508 *
3509 * Returns maximum payload size in bytes
3510 * or appropriate error value.
3511 */
3512int pcie_get_mps(struct pci_dev *dev)
3513{
3514 int ret, cap;
3515 u16 ctl;
3516
3517 cap = pci_pcie_cap(dev);
3518 if (!cap)
3519 return -EINVAL;
3520
3521 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3522 if (!ret)
3523 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3524
3525 return ret;
3526}
3527
3528/**
3529 * pcie_set_mps - set PCI Express maximum payload size
3530 * @dev: PCI device to query
Randy Dunlap47c08f32011-08-20 11:49:43 -07003531 * @mps: maximum payload size in bytes
Jon Masonb03e7492011-07-20 15:20:54 -05003532 * valid values are 128, 256, 512, 1024, 2048, 4096
3533 *
3534 * If possible sets maximum payload size
3535 */
3536int pcie_set_mps(struct pci_dev *dev, int mps)
3537{
3538 int cap, err = -EINVAL;
3539 u16 ctl, v;
3540
3541 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3542 goto out;
3543
3544 v = ffs(mps) - 8;
3545 if (v > dev->pcie_mpss)
3546 goto out;
3547 v <<= 5;
3548
3549 cap = pci_pcie_cap(dev);
3550 if (!cap)
3551 goto out;
3552
3553 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3554 if (err)
3555 goto out;
3556
3557 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3558 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3559 ctl |= v;
3560 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3561 }
3562out:
3563 return err;
3564}
3565
3566/**
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003567 * pci_select_bars - Make BAR mask from the type of resource
Randy Dunlapf95d8822007-02-10 14:41:56 -08003568 * @dev: the PCI device for which BAR mask is made
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003569 * @flags: resource type mask to be selected
3570 *
3571 * This helper routine makes bar mask from the type of resource.
3572 */
3573int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3574{
3575 int i, bars = 0;
3576 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3577 if (pci_resource_flags(dev, i) & flags)
3578 bars |= (1 << i);
3579 return bars;
3580}
3581
Yu Zhao613e7ed2008-11-22 02:41:27 +08003582/**
3583 * pci_resource_bar - get position of the BAR associated with a resource
3584 * @dev: the PCI device
3585 * @resno: the resource number
3586 * @type: the BAR type to be filled in
3587 *
3588 * Returns BAR position in config space, or 0 if the BAR is invalid.
3589 */
3590int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3591{
Yu Zhaod1b054d2009-03-20 11:25:11 +08003592 int reg;
3593
Yu Zhao613e7ed2008-11-22 02:41:27 +08003594 if (resno < PCI_ROM_RESOURCE) {
3595 *type = pci_bar_unknown;
3596 return PCI_BASE_ADDRESS_0 + 4 * resno;
3597 } else if (resno == PCI_ROM_RESOURCE) {
3598 *type = pci_bar_mem32;
3599 return dev->rom_base_reg;
Yu Zhaod1b054d2009-03-20 11:25:11 +08003600 } else if (resno < PCI_BRIDGE_RESOURCES) {
3601 /* device specific resource */
3602 reg = pci_iov_resource_bar(dev, resno, type);
3603 if (reg)
3604 return reg;
Yu Zhao613e7ed2008-11-22 02:41:27 +08003605 }
3606
Bjorn Helgaas865df572009-11-04 10:32:57 -07003607 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
Yu Zhao613e7ed2008-11-22 02:41:27 +08003608 return 0;
3609}
3610
Mike Travis95a8b6e2010-02-02 14:38:13 -08003611/* Some architectures require additional programming to enable VGA */
3612static arch_set_vga_state_t arch_set_vga_state;
3613
3614void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3615{
3616 arch_set_vga_state = func; /* NULL disables */
3617}
3618
3619static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
Dave Airlie7ad35cf2011-05-25 14:00:49 +10003620 unsigned int command_bits, u32 flags)
Mike Travis95a8b6e2010-02-02 14:38:13 -08003621{
3622 if (arch_set_vga_state)
3623 return arch_set_vga_state(dev, decode, command_bits,
Dave Airlie7ad35cf2011-05-25 14:00:49 +10003624 flags);
Mike Travis95a8b6e2010-02-02 14:38:13 -08003625 return 0;
3626}
3627
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003628/**
3629 * pci_set_vga_state - set VGA decode state on device and parents if requested
Randy Dunlap19eea632009-09-17 15:28:22 -07003630 * @dev: the PCI device
3631 * @decode: true = enable decoding, false = disable decoding
3632 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
Randy Dunlap3f37d622011-05-25 19:21:25 -07003633 * @flags: traverse ancestors and change bridges
Dave Airlie3448a192010-06-01 15:32:24 +10003634 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003635 */
3636int pci_set_vga_state(struct pci_dev *dev, bool decode,
Dave Airlie3448a192010-06-01 15:32:24 +10003637 unsigned int command_bits, u32 flags)
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003638{
3639 struct pci_bus *bus;
3640 struct pci_dev *bridge;
3641 u16 cmd;
Mike Travis95a8b6e2010-02-02 14:38:13 -08003642 int rc;
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003643
Dave Airlie3448a192010-06-01 15:32:24 +10003644 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003645
Mike Travis95a8b6e2010-02-02 14:38:13 -08003646 /* ARCH specific VGA enables */
Dave Airlie3448a192010-06-01 15:32:24 +10003647 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
Mike Travis95a8b6e2010-02-02 14:38:13 -08003648 if (rc)
3649 return rc;
3650
Dave Airlie3448a192010-06-01 15:32:24 +10003651 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3652 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3653 if (decode == true)
3654 cmd |= command_bits;
3655 else
3656 cmd &= ~command_bits;
3657 pci_write_config_word(dev, PCI_COMMAND, cmd);
3658 }
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003659
Dave Airlie3448a192010-06-01 15:32:24 +10003660 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003661 return 0;
3662
3663 bus = dev->bus;
3664 while (bus) {
3665 bridge = bus->self;
3666 if (bridge) {
3667 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3668 &cmd);
3669 if (decode == true)
3670 cmd |= PCI_BRIDGE_CTL_VGA;
3671 else
3672 cmd &= ~PCI_BRIDGE_CTL_VGA;
3673 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3674 cmd);
3675 }
3676 bus = bus->parent;
3677 }
3678 return 0;
3679}
3680
Yuji Shimada32a9a682009-03-16 17:13:39 +09003681#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3682static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
Thomas Gleixnere9d1e492009-11-06 22:41:23 +00003683static DEFINE_SPINLOCK(resource_alignment_lock);
Yuji Shimada32a9a682009-03-16 17:13:39 +09003684
3685/**
3686 * pci_specified_resource_alignment - get resource alignment specified by user.
3687 * @dev: the PCI device to get
3688 *
3689 * RETURNS: Resource alignment if it is specified.
3690 * Zero if it is not specified.
3691 */
3692resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3693{
3694 int seg, bus, slot, func, align_order, count;
3695 resource_size_t align = 0;
3696 char *p;
3697
3698 spin_lock(&resource_alignment_lock);
3699 p = resource_alignment_param;
3700 while (*p) {
3701 count = 0;
3702 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3703 p[count] == '@') {
3704 p += count + 1;
3705 } else {
3706 align_order = -1;
3707 }
3708 if (sscanf(p, "%x:%x:%x.%x%n",
3709 &seg, &bus, &slot, &func, &count) != 4) {
3710 seg = 0;
3711 if (sscanf(p, "%x:%x.%x%n",
3712 &bus, &slot, &func, &count) != 3) {
3713 /* Invalid format */
3714 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3715 p);
3716 break;
3717 }
3718 }
3719 p += count;
3720 if (seg == pci_domain_nr(dev->bus) &&
3721 bus == dev->bus->number &&
3722 slot == PCI_SLOT(dev->devfn) &&
3723 func == PCI_FUNC(dev->devfn)) {
3724 if (align_order == -1) {
3725 align = PAGE_SIZE;
3726 } else {
3727 align = 1 << align_order;
3728 }
3729 /* Found */
3730 break;
3731 }
3732 if (*p != ';' && *p != ',') {
3733 /* End of param or invalid format */
3734 break;
3735 }
3736 p++;
3737 }
3738 spin_unlock(&resource_alignment_lock);
3739 return align;
3740}
3741
3742/**
3743 * pci_is_reassigndev - check if specified PCI is target device to reassign
3744 * @dev: the PCI device to check
3745 *
3746 * RETURNS: non-zero for PCI device is a target device to reassign,
3747 * or zero is not.
3748 */
3749int pci_is_reassigndev(struct pci_dev *dev)
3750{
3751 return (pci_specified_resource_alignment(dev) != 0);
3752}
3753
Yinghai Lu2069ecf2012-02-15 21:40:31 -08003754/*
3755 * This function disables memory decoding and releases memory resources
3756 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3757 * It also rounds up size to specified alignment.
3758 * Later on, the kernel will assign page-aligned memory resource back
3759 * to the device.
3760 */
3761void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3762{
3763 int i;
3764 struct resource *r;
3765 resource_size_t align, size;
3766 u16 command;
3767
3768 if (!pci_is_reassigndev(dev))
3769 return;
3770
3771 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3772 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3773 dev_warn(&dev->dev,
3774 "Can't reassign resources to host bridge.\n");
3775 return;
3776 }
3777
3778 dev_info(&dev->dev,
3779 "Disabling memory decoding and releasing memory resources.\n");
3780 pci_read_config_word(dev, PCI_COMMAND, &command);
3781 command &= ~PCI_COMMAND_MEMORY;
3782 pci_write_config_word(dev, PCI_COMMAND, command);
3783
3784 align = pci_specified_resource_alignment(dev);
3785 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3786 r = &dev->resource[i];
3787 if (!(r->flags & IORESOURCE_MEM))
3788 continue;
3789 size = resource_size(r);
3790 if (size < align) {
3791 size = align;
3792 dev_info(&dev->dev,
3793 "Rounding up size of resource #%d to %#llx.\n",
3794 i, (unsigned long long)size);
3795 }
3796 r->end = size - 1;
3797 r->start = 0;
3798 }
3799 /* Need to disable bridge's resource window,
3800 * to enable the kernel to reassign new resource
3801 * window later on.
3802 */
3803 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3804 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3805 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3806 r = &dev->resource[i];
3807 if (!(r->flags & IORESOURCE_MEM))
3808 continue;
3809 r->end = resource_size(r) - 1;
3810 r->start = 0;
3811 }
3812 pci_disable_bridge_window(dev);
3813 }
3814}
3815
Yuji Shimada32a9a682009-03-16 17:13:39 +09003816ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3817{
3818 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3819 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3820 spin_lock(&resource_alignment_lock);
3821 strncpy(resource_alignment_param, buf, count);
3822 resource_alignment_param[count] = '\0';
3823 spin_unlock(&resource_alignment_lock);
3824 return count;
3825}
3826
3827ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3828{
3829 size_t count;
3830 spin_lock(&resource_alignment_lock);
3831 count = snprintf(buf, size, "%s", resource_alignment_param);
3832 spin_unlock(&resource_alignment_lock);
3833 return count;
3834}
3835
3836static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3837{
3838 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3839}
3840
3841static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3842 const char *buf, size_t count)
3843{
3844 return pci_set_resource_alignment_param(buf, count);
3845}
3846
3847BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3848 pci_resource_alignment_store);
3849
3850static int __init pci_resource_alignment_sysfs_init(void)
3851{
3852 return bus_create_file(&pci_bus_type,
3853 &bus_attr_resource_alignment);
3854}
3855
3856late_initcall(pci_resource_alignment_sysfs_init);
3857
Jeff Garzik32a2eea2007-10-11 16:57:27 -04003858static void __devinit pci_no_domains(void)
3859{
3860#ifdef CONFIG_PCI_DOMAINS
3861 pci_domains_supported = 0;
3862#endif
3863}
3864
Andrew Patterson0ef5f8f2008-11-10 15:30:50 -07003865/**
3866 * pci_ext_cfg_enabled - can we access extended PCI config space?
3867 * @dev: The PCI device of the root bridge.
3868 *
3869 * Returns 1 if we can access PCI extended config space (offsets
3870 * greater than 0xff). This is the default implementation. Architecture
3871 * implementations can override this.
3872 */
3873int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3874{
3875 return 1;
3876}
3877
Benjamin Herrenschmidt2d1c8612009-12-09 17:52:13 +11003878void __weak pci_fixup_cardbus(struct pci_bus *bus)
3879{
3880}
3881EXPORT_SYMBOL(pci_fixup_cardbus);
3882
Al Viroad04d312008-11-22 17:37:14 +00003883static int __init pci_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884{
3885 while (str) {
3886 char *k = strchr(str, ',');
3887 if (k)
3888 *k++ = 0;
3889 if (*str && (str = pcibios_setup(str)) && *str) {
Matthew Wilcox309e57d2006-03-05 22:33:34 -07003890 if (!strcmp(str, "nomsi")) {
3891 pci_no_msi();
Randy Dunlap7f785762007-10-05 13:17:58 -07003892 } else if (!strcmp(str, "noaer")) {
3893 pci_no_aer();
Yinghai Lub55438f2012-02-23 19:23:30 -08003894 } else if (!strncmp(str, "realloc=", 8)) {
3895 pci_realloc_get_opt(str + 8);
Ram Paif483d392011-07-07 11:19:10 -07003896 } else if (!strncmp(str, "realloc", 7)) {
Yinghai Lub55438f2012-02-23 19:23:30 -08003897 pci_realloc_get_opt("on");
Jeff Garzik32a2eea2007-10-11 16:57:27 -04003898 } else if (!strcmp(str, "nodomains")) {
3899 pci_no_domains();
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +01003900 } else if (!strncmp(str, "noari", 5)) {
3901 pcie_ari_disabled = true;
Atsushi Nemoto4516a612007-02-05 16:36:06 -08003902 } else if (!strncmp(str, "cbiosize=", 9)) {
3903 pci_cardbus_io_size = memparse(str + 9, &str);
3904 } else if (!strncmp(str, "cbmemsize=", 10)) {
3905 pci_cardbus_mem_size = memparse(str + 10, &str);
Yuji Shimada32a9a682009-03-16 17:13:39 +09003906 } else if (!strncmp(str, "resource_alignment=", 19)) {
3907 pci_set_resource_alignment_param(str + 19,
3908 strlen(str + 19));
Andrew Patterson43c16402009-04-22 16:52:09 -06003909 } else if (!strncmp(str, "ecrc=", 5)) {
3910 pcie_ecrc_get_policy(str + 5);
Eric W. Biederman28760482009-09-09 14:09:24 -07003911 } else if (!strncmp(str, "hpiosize=", 9)) {
3912 pci_hotplug_io_size = memparse(str + 9, &str);
3913 } else if (!strncmp(str, "hpmemsize=", 10)) {
3914 pci_hotplug_mem_size = memparse(str + 10, &str);
Jon Mason5f39e672011-10-03 09:50:20 -05003915 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3916 pcie_bus_config = PCIE_BUS_TUNE_OFF;
Jon Masonb03e7492011-07-20 15:20:54 -05003917 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3918 pcie_bus_config = PCIE_BUS_SAFE;
3919 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3920 pcie_bus_config = PCIE_BUS_PERFORMANCE;
Jon Mason5f39e672011-10-03 09:50:20 -05003921 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3922 pcie_bus_config = PCIE_BUS_PEER2PEER;
Bjorn Helgaas284f5f92012-04-30 15:21:02 -06003923 } else if (!strncmp(str, "pcie_scan_all", 13)) {
3924 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
Matthew Wilcox309e57d2006-03-05 22:33:34 -07003925 } else {
3926 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3927 str);
3928 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929 }
3930 str = k;
3931 }
Andi Kleen0637a702006-09-26 10:52:41 +02003932 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933}
Andi Kleen0637a702006-09-26 10:52:41 +02003934early_param("pci", pci_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935
Tejun Heo0b62e132007-07-27 14:43:35 +09003936EXPORT_SYMBOL(pci_reenable_device);
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11003937EXPORT_SYMBOL(pci_enable_device_io);
3938EXPORT_SYMBOL(pci_enable_device_mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939EXPORT_SYMBOL(pci_enable_device);
Tejun Heo9ac78492007-01-20 16:00:26 +09003940EXPORT_SYMBOL(pcim_enable_device);
3941EXPORT_SYMBOL(pcim_pin_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942EXPORT_SYMBOL(pci_disable_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943EXPORT_SYMBOL(pci_find_capability);
3944EXPORT_SYMBOL(pci_bus_find_capability);
3945EXPORT_SYMBOL(pci_release_regions);
3946EXPORT_SYMBOL(pci_request_regions);
Arjan van de Vene8de1482008-10-22 19:55:31 -07003947EXPORT_SYMBOL(pci_request_regions_exclusive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948EXPORT_SYMBOL(pci_release_region);
3949EXPORT_SYMBOL(pci_request_region);
Arjan van de Vene8de1482008-10-22 19:55:31 -07003950EXPORT_SYMBOL(pci_request_region_exclusive);
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003951EXPORT_SYMBOL(pci_release_selected_regions);
3952EXPORT_SYMBOL(pci_request_selected_regions);
Arjan van de Vene8de1482008-10-22 19:55:31 -07003953EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954EXPORT_SYMBOL(pci_set_master);
Ben Hutchings6a479072008-12-23 03:08:29 +00003955EXPORT_SYMBOL(pci_clear_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956EXPORT_SYMBOL(pci_set_mwi);
Randy Dunlap694625c2007-07-09 11:55:54 -07003957EXPORT_SYMBOL(pci_try_set_mwi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958EXPORT_SYMBOL(pci_clear_mwi);
Brett M Russa04ce0f2005-08-15 15:23:41 -04003959EXPORT_SYMBOL_GPL(pci_intx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960EXPORT_SYMBOL(pci_assign_resource);
3961EXPORT_SYMBOL(pci_find_parent_resource);
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003962EXPORT_SYMBOL(pci_select_bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
3964EXPORT_SYMBOL(pci_set_power_state);
3965EXPORT_SYMBOL(pci_save_state);
3966EXPORT_SYMBOL(pci_restore_state);
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02003967EXPORT_SYMBOL(pci_pme_capable);
Rafael J. Wysocki5a6c9b62008-08-08 00:14:24 +02003968EXPORT_SYMBOL(pci_pme_active);
Rafael J. Wysocki0235c4f2008-08-18 21:38:00 +02003969EXPORT_SYMBOL(pci_wake_from_d3);
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02003970EXPORT_SYMBOL(pci_target_state);
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02003971EXPORT_SYMBOL(pci_prepare_to_sleep);
3972EXPORT_SYMBOL(pci_back_from_sleep);
Brian Kingf7bdd122007-04-06 16:39:36 -05003973EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);