2 * Low-Level PCI Access for i386 machines
4 * Copyright 1993, 1994 Drew Eckhardt
6 * (Unix and Linux consulting and custom programming)
10 * Drew's work was sponsored by:
11 * iX Multiuser Multitasking Magazine
15 * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
17 * For more information, please consult the following manuals (look at
18 * http://www.pcisig.com/ for how to get them):
20 * PCI BIOS Specification
21 * PCI Local Bus Specification
22 * PCI to PCI Bridge Specification
23 * PCI System Design Guide
27 #include <linux/types.h>
28 #include <linux/kernel.h>
29 #include <linux/pci.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/errno.h>
33 #include <linux/bootmem.h>
34 #include <linux/acpi.h>
38 #include <asm/io_apic.h>
44 skip_isa_ioresource_align(struct pci_dev *dev) {
46 if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
47 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
53 * We need to avoid collisions with `mirrored' VGA ports
54 * and other strange ISA hardware, so we always want the
55 * addresses to be allocated in the 0x000-0x0ff region
58 * Why? Because some silly external IO cards only decode
59 * the low 10 bits of the IO address. The 0x00-0xff region
60 * is reserved for motherboard devices that decode all 16
61 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
62 * but we want to try to avoid allocating at 0x2900-0x2bff
63 * which might have be mirrored at 0x0100-0x03ff..
66 pcibios_align_resource(void *data, struct resource *res,
67 resource_size_t size, resource_size_t align)
69 struct pci_dev *dev = data;
71 if (res->flags & IORESOURCE_IO) {
72 resource_size_t start = res->start;
74 if (skip_isa_ioresource_align(dev))
77 start = (start + 0x3ff) & ~0x3ff;
82 EXPORT_SYMBOL(pcibios_align_resource);
84 static int check_res_with_valid(struct pci_dev *dev, struct resource *res)
91 size = (res->start == 0 && res->end == res->start) ? 0 :
92 (res->end - res->start + 1);
97 #ifdef CONFIG_HPET_TIMER
99 if (base == hpet_address && (res->flags & IORESOURCE_MEM)) {
100 dev_info(&dev->dev, "BAR has HPET at %08lx-%08lx\n",
101 base, base + size - 1);
106 #ifdef CONFIG_X86_IO_APIC
107 for (i = 0; i < nr_ioapics; i++) {
108 unsigned long ioapic_phys = mp_ioapics[i].mp_apicaddr;
110 if (base == ioapic_phys && (res->flags & IORESOURCE_MEM)) {
111 dev_info(&dev->dev, "BAR has ioapic at %08lx-%08lx\n",
112 base, base + size - 1);
118 #ifdef CONFIG_PCI_MMCONFIG
119 for (i = 0; i < pci_mmcfg_config_num; i++) {
122 addr = pci_mmcfg_config[i].address;
123 if (base == addr && (res->flags & IORESOURCE_MEM)) {
124 dev_info(&dev->dev, "BAR has MMCONFIG at %08lx-%08lx\n",
125 base, base + size - 1);
134 static int check_platform(struct pci_dev *dev, struct resource *res)
136 struct resource *root = NULL;
139 * forcibly insert it into the
142 if (res->flags & IORESOURCE_MEM)
143 root = &iomem_resource;
144 else if (res->flags & IORESOURCE_IO)
145 root = &ioport_resource;
147 if (root && check_res_with_valid(dev, res)) {
148 insert_resource(root, res);
156 * Handle resources of PCI devices. If the world were perfect, we could
157 * just allocate all the resource regions and do nothing more. It isn't.
158 * On the other hand, we cannot just re-allocate all devices, as it would
159 * require us to know lots of host bridge internals. So we attempt to
160 * keep as much of the original configuration as possible, but tweak it
161 * when it's found to be wrong.
163 * Known BIOS problems we have to work around:
164 * - I/O or memory regions not configured
165 * - regions configured, but not enabled in the command register
166 * - bogus I/O addresses above 64K used
167 * - expansion ROMs left enabled (this may sound harmless, but given
168 * the fact the PCI specs explicitly allow address decoders to be
169 * shared between expansion ROMs and other resource regions, it's
170 * at least dangerous)
173 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
174 * This gives us fixed barriers on where we can allocate.
175 * (2) Allocate resources for all enabled devices. If there is
176 * a collision, just mark the resource as unallocated. Also
177 * disable expansion ROMs during this step.
178 * (3) Try to allocate resources for disabled devices. If the
179 * resources were assigned correctly, everything goes well,
180 * if they weren't, they won't disturb allocation of other
182 * (4) Assign new addresses to resources which were either
183 * not configured at all or misconfigured. If explicitly
184 * requested by the user, configure expansion ROM address
188 static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
193 struct resource *r, *pr;
195 /* Depth-First Search on bus tree */
196 list_for_each_entry(bus, bus_list, node) {
197 if ((dev = bus->self)) {
198 for (idx = PCI_BRIDGE_RESOURCES;
199 idx < PCI_NUM_RESOURCES; idx++) {
200 r = &dev->resource[idx];
203 pr = pci_find_parent_resource(dev, r);
204 if (!r->start || !pr ||
205 request_resource(pr, r) < 0) {
206 if (check_platform(dev, r))
208 dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
210 * Something is wrong with the region.
211 * Invalidate the resource to prevent
212 * child resource allocations in this
219 pcibios_allocate_bus_resources(&bus->children);
223 static void __init pcibios_allocate_resources(int pass)
225 struct pci_dev *dev = NULL;
228 struct resource *r, *pr;
230 for_each_pci_dev(dev) {
231 pci_read_config_word(dev, PCI_COMMAND, &command);
232 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
233 r = &dev->resource[idx];
234 if (r->parent) /* Already allocated */
236 if (!r->start) /* Address not assigned at all */
238 if (r->flags & IORESOURCE_IO)
239 disabled = !(command & PCI_COMMAND_IO);
241 disabled = !(command & PCI_COMMAND_MEMORY);
242 if (pass == disabled) {
243 dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n",
244 (unsigned long long) r->start,
245 (unsigned long long) r->end,
246 r->flags, disabled, pass);
247 pr = pci_find_parent_resource(dev, r);
248 if (!pr || request_resource(pr, r) < 0) {
249 if (check_platform(dev, r))
251 dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
252 /* We'll assign a new address later */
259 r = &dev->resource[PCI_ROM_RESOURCE];
260 if (r->flags & IORESOURCE_ROM_ENABLE) {
261 /* Turn the ROM off, leave the resource region,
262 * but keep it unregistered. */
264 dev_dbg(&dev->dev, "disabling ROM\n");
265 r->flags &= ~IORESOURCE_ROM_ENABLE;
266 pci_read_config_dword(dev,
267 dev->rom_base_reg, ®);
268 pci_write_config_dword(dev, dev->rom_base_reg,
269 reg & ~PCI_ROM_ADDRESS_ENABLE);
275 static int __init pcibios_assign_resources(void)
277 struct pci_dev *dev = NULL;
278 struct resource *r, *pr;
280 if (!(pci_probe & PCI_ASSIGN_ROMS)) {
282 * Try to use BIOS settings for ROMs, otherwise let
283 * pci_assign_unassigned_resources() allocate the new
286 for_each_pci_dev(dev) {
287 r = &dev->resource[PCI_ROM_RESOURCE];
288 if (!r->flags || !r->start)
290 pr = pci_find_parent_resource(dev, r);
291 if (!pr || request_resource(pr, r) < 0) {
298 pci_assign_unassigned_resources();
303 void __init pcibios_resource_survey(void)
305 DBG("PCI: Allocating resources\n");
306 pcibios_allocate_bus_resources(&pci_root_buses);
307 pcibios_allocate_resources(0);
308 pcibios_allocate_resources(1);
312 * called in fs_initcall (one below subsys_initcall),
313 * give a chance for motherboard reserve resources
315 fs_initcall(pcibios_assign_resources);
318 * If we set up a device for bus mastering, we need to check the latency
319 * timer as certain crappy BIOSes forget to set it properly.
321 unsigned int pcibios_max_latency = 255;
323 void pcibios_set_master(struct pci_dev *dev)
326 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
328 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
329 else if (lat > pcibios_max_latency)
330 lat = pcibios_max_latency;
333 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
334 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
337 static void pci_unmap_page_range(struct vm_area_struct *vma)
339 u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT;
340 free_memtype(addr, addr + vma->vm_end - vma->vm_start);
343 static void pci_track_mmap_page_range(struct vm_area_struct *vma)
345 u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT;
346 unsigned long flags = pgprot_val(vma->vm_page_prot)
349 reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL);
352 static struct vm_operations_struct pci_mmap_ops = {
353 .open = pci_track_mmap_page_range,
354 .close = pci_unmap_page_range,
355 .access = generic_access_phys,
358 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
359 enum pci_mmap_state mmap_state, int write_combine)
362 u64 addr = vma->vm_pgoff << PAGE_SHIFT;
363 unsigned long len = vma->vm_end - vma->vm_start;
365 unsigned long new_flags;
368 /* I/O space cannot be accessed via normal processor loads and
369 * stores on this platform.
371 if (mmap_state == pci_mmap_io)
374 prot = pgprot_val(vma->vm_page_prot);
375 if (pat_enabled && write_combine)
376 prot |= _PAGE_CACHE_WC;
377 else if (pat_enabled || boot_cpu_data.x86 > 3)
379 * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
380 * To avoid attribute conflicts, request UC MINUS here
383 prot |= _PAGE_CACHE_UC_MINUS;
385 vma->vm_page_prot = __pgprot(prot);
387 flags = pgprot_val(vma->vm_page_prot) & _PAGE_CACHE_MASK;
388 retval = reserve_memtype(addr, addr + len, flags, &new_flags);
392 if (flags != new_flags) {
394 * Do not fallback to certain memory types with certain
396 * - request is uncached, return cannot be write-back
397 * - request is uncached, return cannot be write-combine
398 * - request is write-combine, return cannot be write-back
400 if ((flags == _PAGE_CACHE_UC_MINUS &&
401 (new_flags == _PAGE_CACHE_WB)) ||
402 (flags == _PAGE_CACHE_WC &&
403 new_flags == _PAGE_CACHE_WB)) {
404 free_memtype(addr, addr+len);
410 if (((vma->vm_pgoff < max_low_pfn_mapped) ||
411 (vma->vm_pgoff >= (1UL<<(32 - PAGE_SHIFT)) &&
412 vma->vm_pgoff < max_pfn_mapped)) &&
413 ioremap_change_attr((unsigned long)__va(addr), len, flags)) {
414 free_memtype(addr, addr + len);
418 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
419 vma->vm_end - vma->vm_start,
423 vma->vm_ops = &pci_mmap_ops;