agp/intel-gtt: kill intel_i830_tlbflush
[safe/jmp/linux-2.6] / drivers / char / agp / intel-gtt.c
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17
18 /*
19  * If we have Intel graphics, we're not going to have anything other than
20  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21  * on the Intel IOMMU support (CONFIG_DMAR).
22  * Only newer chipsets need to bother with this, of course.
23  */
24 #ifdef CONFIG_DMAR
25 #define USE_PCI_DMA_API 1
26 #endif
27
28 static const struct aper_size_info_fixed intel_i810_sizes[] =
29 {
30         {64, 16384, 4},
31         /* The 32M mode still requires a 64k gatt */
32         {32, 8192, 4}
33 };
34
35 #define AGP_DCACHE_MEMORY       1
36 #define AGP_PHYS_MEMORY         2
37 #define INTEL_AGP_CACHED_MEMORY 3
38
39 static struct gatt_mask intel_i810_masks[] =
40 {
41         {.mask = I810_PTE_VALID, .type = 0},
42         {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
43         {.mask = I810_PTE_VALID, .type = 0},
44         {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
45          .type = INTEL_AGP_CACHED_MEMORY}
46 };
47
48 static struct _intel_private {
49         struct pci_dev *pcidev; /* device one */
50         u8 __iomem *registers;
51         u32 __iomem *gtt;               /* I915G */
52         int num_dcache_entries;
53         /* gtt_entries is the number of gtt entries that are already mapped
54          * to stolen memory.  Stolen memory is larger than the memory mapped
55          * through gtt_entries, as it includes some reserved space for the BIOS
56          * popup and for the GTT.
57          */
58         int gtt_entries;                        /* i830+ */
59         int gtt_total_size;
60         union {
61                 void __iomem *i9xx_flush_page;
62                 void *i8xx_flush_page;
63         };
64         struct page *i8xx_page;
65         struct resource ifp_resource;
66         int resource_valid;
67 } intel_private;
68
69 #ifdef USE_PCI_DMA_API
70 static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
71 {
72         *ret = pci_map_page(intel_private.pcidev, page, 0,
73                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74         if (pci_dma_mapping_error(intel_private.pcidev, *ret))
75                 return -EINVAL;
76         return 0;
77 }
78
79 static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
80 {
81         pci_unmap_page(intel_private.pcidev, dma,
82                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83 }
84
85 static void intel_agp_free_sglist(struct agp_memory *mem)
86 {
87         struct sg_table st;
88
89         st.sgl = mem->sg_list;
90         st.orig_nents = st.nents = mem->page_count;
91
92         sg_free_table(&st);
93
94         mem->sg_list = NULL;
95         mem->num_sg = 0;
96 }
97
98 static int intel_agp_map_memory(struct agp_memory *mem)
99 {
100         struct sg_table st;
101         struct scatterlist *sg;
102         int i;
103
104         DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105
106         if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107                 return -ENOMEM;
108
109         mem->sg_list = sg = st.sgl;
110
111         for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
112                 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
113
114         mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115                                  mem->page_count, PCI_DMA_BIDIRECTIONAL);
116         if (unlikely(!mem->num_sg)) {
117                 intel_agp_free_sglist(mem);
118                 return -ENOMEM;
119         }
120         return 0;
121 }
122
123 static void intel_agp_unmap_memory(struct agp_memory *mem)
124 {
125         DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
126
127         pci_unmap_sg(intel_private.pcidev, mem->sg_list,
128                      mem->page_count, PCI_DMA_BIDIRECTIONAL);
129         intel_agp_free_sglist(mem);
130 }
131
132 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
133                                         off_t pg_start, int mask_type)
134 {
135         struct scatterlist *sg;
136         int i, j;
137
138         j = pg_start;
139
140         WARN_ON(!mem->num_sg);
141
142         if (mem->num_sg == mem->page_count) {
143                 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
144                         writel(agp_bridge->driver->mask_memory(agp_bridge,
145                                         sg_dma_address(sg), mask_type),
146                                         intel_private.gtt+j);
147                         j++;
148                 }
149         } else {
150                 /* sg may merge pages, but we have to separate
151                  * per-page addr for GTT */
152                 unsigned int len, m;
153
154                 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
155                         len = sg_dma_len(sg) / PAGE_SIZE;
156                         for (m = 0; m < len; m++) {
157                                 writel(agp_bridge->driver->mask_memory(agp_bridge,
158                                                                        sg_dma_address(sg) + m * PAGE_SIZE,
159                                                                        mask_type),
160                                        intel_private.gtt+j);
161                                 j++;
162                         }
163                 }
164         }
165         readl(intel_private.gtt+j-1);
166 }
167
168 #else
169
170 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171                                         off_t pg_start, int mask_type)
172 {
173         int i, j;
174         u32 cache_bits = 0;
175
176         if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177             agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178         {
179                 cache_bits = I830_PTE_SYSTEM_CACHED;
180         }
181
182         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
183                 writel(agp_bridge->driver->mask_memory(agp_bridge,
184                                 page_to_phys(mem->pages[i]), mask_type),
185                        intel_private.gtt+j);
186         }
187
188         readl(intel_private.gtt+j-1);
189 }
190
191 #endif
192
193 static int intel_i810_fetch_size(void)
194 {
195         u32 smram_miscc;
196         struct aper_size_info_fixed *values;
197
198         pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
199         values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
200
201         if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
202                 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
203                 return 0;
204         }
205         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
206                 agp_bridge->previous_size =
207                         agp_bridge->current_size = (void *) (values + 1);
208                 agp_bridge->aperture_size_idx = 1;
209                 return values[1].size;
210         } else {
211                 agp_bridge->previous_size =
212                         agp_bridge->current_size = (void *) (values);
213                 agp_bridge->aperture_size_idx = 0;
214                 return values[0].size;
215         }
216
217         return 0;
218 }
219
220 static int intel_i810_configure(void)
221 {
222         struct aper_size_info_fixed *current_size;
223         u32 temp;
224         int i;
225
226         current_size = A_SIZE_FIX(agp_bridge->current_size);
227
228         if (!intel_private.registers) {
229                 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
230                 temp &= 0xfff80000;
231
232                 intel_private.registers = ioremap(temp, 128 * 4096);
233                 if (!intel_private.registers) {
234                         dev_err(&intel_private.pcidev->dev,
235                                 "can't remap memory\n");
236                         return -ENOMEM;
237                 }
238         }
239
240         if ((readl(intel_private.registers+I810_DRAM_CTL)
241                 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
242                 /* This will need to be dynamically assigned */
243                 dev_info(&intel_private.pcidev->dev,
244                          "detected 4MB dedicated video ram\n");
245                 intel_private.num_dcache_entries = 1024;
246         }
247         pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
248         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
249         writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
250         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
251
252         if (agp_bridge->driver->needs_scratch_page) {
253                 for (i = 0; i < current_size->num_entries; i++) {
254                         writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
255                 }
256                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
257         }
258         global_cache_flush();
259         return 0;
260 }
261
262 static void intel_i810_cleanup(void)
263 {
264         writel(0, intel_private.registers+I810_PGETBL_CTL);
265         readl(intel_private.registers); /* PCI Posting. */
266         iounmap(intel_private.registers);
267 }
268
269 static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
270 {
271         return;
272 }
273
274 /* Exists to support ARGB cursors */
275 static struct page *i8xx_alloc_pages(void)
276 {
277         struct page *page;
278
279         page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
280         if (page == NULL)
281                 return NULL;
282
283         if (set_pages_uc(page, 4) < 0) {
284                 set_pages_wb(page, 4);
285                 __free_pages(page, 2);
286                 return NULL;
287         }
288         get_page(page);
289         atomic_inc(&agp_bridge->current_memory_agp);
290         return page;
291 }
292
293 static void i8xx_destroy_pages(struct page *page)
294 {
295         if (page == NULL)
296                 return;
297
298         set_pages_wb(page, 4);
299         put_page(page);
300         __free_pages(page, 2);
301         atomic_dec(&agp_bridge->current_memory_agp);
302 }
303
304 static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
305                                         int type)
306 {
307         if (type < AGP_USER_TYPES)
308                 return type;
309         else if (type == AGP_USER_CACHED_MEMORY)
310                 return INTEL_AGP_CACHED_MEMORY;
311         else
312                 return 0;
313 }
314
315 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
316                                 int type)
317 {
318         int i, j, num_entries;
319         void *temp;
320         int ret = -EINVAL;
321         int mask_type;
322
323         if (mem->page_count == 0)
324                 goto out;
325
326         temp = agp_bridge->current_size;
327         num_entries = A_SIZE_FIX(temp)->num_entries;
328
329         if ((pg_start + mem->page_count) > num_entries)
330                 goto out_err;
331
332
333         for (j = pg_start; j < (pg_start + mem->page_count); j++) {
334                 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
335                         ret = -EBUSY;
336                         goto out_err;
337                 }
338         }
339
340         if (type != mem->type)
341                 goto out_err;
342
343         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
344
345         switch (mask_type) {
346         case AGP_DCACHE_MEMORY:
347                 if (!mem->is_flushed)
348                         global_cache_flush();
349                 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
350                         writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
351                                intel_private.registers+I810_PTE_BASE+(i*4));
352                 }
353                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
354                 break;
355         case AGP_PHYS_MEMORY:
356         case AGP_NORMAL_MEMORY:
357                 if (!mem->is_flushed)
358                         global_cache_flush();
359                 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
360                         writel(agp_bridge->driver->mask_memory(agp_bridge,
361                                         page_to_phys(mem->pages[i]), mask_type),
362                                intel_private.registers+I810_PTE_BASE+(j*4));
363                 }
364                 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
365                 break;
366         default:
367                 goto out_err;
368         }
369
370 out:
371         ret = 0;
372 out_err:
373         mem->is_flushed = true;
374         return ret;
375 }
376
377 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
378                                 int type)
379 {
380         int i;
381
382         if (mem->page_count == 0)
383                 return 0;
384
385         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
386                 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
387         }
388         readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
389
390         return 0;
391 }
392
393 /*
394  * The i810/i830 requires a physical address to program its mouse
395  * pointer into hardware.
396  * However the Xserver still writes to it through the agp aperture.
397  */
398 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
399 {
400         struct agp_memory *new;
401         struct page *page;
402
403         switch (pg_count) {
404         case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
405                 break;
406         case 4:
407                 /* kludge to get 4 physical pages for ARGB cursor */
408                 page = i8xx_alloc_pages();
409                 break;
410         default:
411                 return NULL;
412         }
413
414         if (page == NULL)
415                 return NULL;
416
417         new = agp_create_memory(pg_count);
418         if (new == NULL)
419                 return NULL;
420
421         new->pages[0] = page;
422         if (pg_count == 4) {
423                 /* kludge to get 4 physical pages for ARGB cursor */
424                 new->pages[1] = new->pages[0] + 1;
425                 new->pages[2] = new->pages[1] + 1;
426                 new->pages[3] = new->pages[2] + 1;
427         }
428         new->page_count = pg_count;
429         new->num_scratch_pages = pg_count;
430         new->type = AGP_PHYS_MEMORY;
431         new->physical = page_to_phys(new->pages[0]);
432         return new;
433 }
434
435 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
436 {
437         struct agp_memory *new;
438
439         if (type == AGP_DCACHE_MEMORY) {
440                 if (pg_count != intel_private.num_dcache_entries)
441                         return NULL;
442
443                 new = agp_create_memory(1);
444                 if (new == NULL)
445                         return NULL;
446
447                 new->type = AGP_DCACHE_MEMORY;
448                 new->page_count = pg_count;
449                 new->num_scratch_pages = 0;
450                 agp_free_page_array(new);
451                 return new;
452         }
453         if (type == AGP_PHYS_MEMORY)
454                 return alloc_agpphysmem_i8xx(pg_count, type);
455         return NULL;
456 }
457
458 static void intel_i810_free_by_type(struct agp_memory *curr)
459 {
460         agp_free_key(curr->key);
461         if (curr->type == AGP_PHYS_MEMORY) {
462                 if (curr->page_count == 4)
463                         i8xx_destroy_pages(curr->pages[0]);
464                 else {
465                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
466                                                              AGP_PAGE_DESTROY_UNMAP);
467                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
468                                                              AGP_PAGE_DESTROY_FREE);
469                 }
470                 agp_free_page_array(curr);
471         }
472         kfree(curr);
473 }
474
475 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
476                                             dma_addr_t addr, int type)
477 {
478         /* Type checking must be done elsewhere */
479         return addr | bridge->driver->masks[type].mask;
480 }
481
482 static struct aper_size_info_fixed intel_i830_sizes[] =
483 {
484         {128, 32768, 5},
485         /* The 64M mode still requires a 128k gatt */
486         {64, 16384, 5},
487         {256, 65536, 6},
488         {512, 131072, 7},
489 };
490
491 static void intel_i830_init_gtt_entries(void)
492 {
493         u16 gmch_ctrl;
494         int gtt_entries = 0;
495         u8 rdct;
496         int local = 0;
497         static const int ddt[4] = { 0, 16, 32, 64 };
498         int size; /* reserved space (in kb) at the top of stolen memory */
499
500         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
501
502         if (IS_I965) {
503                 u32 pgetbl_ctl;
504                 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
505
506                 /* The 965 has a field telling us the size of the GTT,
507                  * which may be larger than what is necessary to map the
508                  * aperture.
509                  */
510                 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
511                 case I965_PGETBL_SIZE_128KB:
512                         size = 128;
513                         break;
514                 case I965_PGETBL_SIZE_256KB:
515                         size = 256;
516                         break;
517                 case I965_PGETBL_SIZE_512KB:
518                         size = 512;
519                         break;
520                 case I965_PGETBL_SIZE_1MB:
521                         size = 1024;
522                         break;
523                 case I965_PGETBL_SIZE_2MB:
524                         size = 2048;
525                         break;
526                 case I965_PGETBL_SIZE_1_5MB:
527                         size = 1024 + 512;
528                         break;
529                 default:
530                         dev_info(&intel_private.pcidev->dev,
531                                  "unknown page table size, assuming 512KB\n");
532                         size = 512;
533                 }
534                 size += 4; /* add in BIOS popup space */
535         } else if (IS_G33 && !IS_PINEVIEW) {
536         /* G33's GTT size defined in gmch_ctrl */
537                 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
538                 case G33_PGETBL_SIZE_1M:
539                         size = 1024;
540                         break;
541                 case G33_PGETBL_SIZE_2M:
542                         size = 2048;
543                         break;
544                 default:
545                         dev_info(&agp_bridge->dev->dev,
546                                  "unknown page table size 0x%x, assuming 512KB\n",
547                                 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
548                         size = 512;
549                 }
550                 size += 4;
551         } else if (IS_G4X || IS_PINEVIEW) {
552                 /* On 4 series hardware, GTT stolen is separate from graphics
553                  * stolen, ignore it in stolen gtt entries counting.  However,
554                  * 4KB of the stolen memory doesn't get mapped to the GTT.
555                  */
556                 size = 4;
557         } else {
558                 /* On previous hardware, the GTT size was just what was
559                  * required to map the aperture.
560                  */
561                 size = agp_bridge->driver->fetch_size() + 4;
562         }
563
564         if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
565             agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
566                 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
567                 case I830_GMCH_GMS_STOLEN_512:
568                         gtt_entries = KB(512) - KB(size);
569                         break;
570                 case I830_GMCH_GMS_STOLEN_1024:
571                         gtt_entries = MB(1) - KB(size);
572                         break;
573                 case I830_GMCH_GMS_STOLEN_8192:
574                         gtt_entries = MB(8) - KB(size);
575                         break;
576                 case I830_GMCH_GMS_LOCAL:
577                         rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
578                         gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
579                                         MB(ddt[I830_RDRAM_DDT(rdct)]);
580                         local = 1;
581                         break;
582                 default:
583                         gtt_entries = 0;
584                         break;
585                 }
586         } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
587                    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
588                 /*
589                  * SandyBridge has new memory control reg at 0x50.w
590                  */
591                 u16 snb_gmch_ctl;
592                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
593                 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
594                 case SNB_GMCH_GMS_STOLEN_32M:
595                         gtt_entries = MB(32) - KB(size);
596                         break;
597                 case SNB_GMCH_GMS_STOLEN_64M:
598                         gtt_entries = MB(64) - KB(size);
599                         break;
600                 case SNB_GMCH_GMS_STOLEN_96M:
601                         gtt_entries = MB(96) - KB(size);
602                         break;
603                 case SNB_GMCH_GMS_STOLEN_128M:
604                         gtt_entries = MB(128) - KB(size);
605                         break;
606                 case SNB_GMCH_GMS_STOLEN_160M:
607                         gtt_entries = MB(160) - KB(size);
608                         break;
609                 case SNB_GMCH_GMS_STOLEN_192M:
610                         gtt_entries = MB(192) - KB(size);
611                         break;
612                 case SNB_GMCH_GMS_STOLEN_224M:
613                         gtt_entries = MB(224) - KB(size);
614                         break;
615                 case SNB_GMCH_GMS_STOLEN_256M:
616                         gtt_entries = MB(256) - KB(size);
617                         break;
618                 case SNB_GMCH_GMS_STOLEN_288M:
619                         gtt_entries = MB(288) - KB(size);
620                         break;
621                 case SNB_GMCH_GMS_STOLEN_320M:
622                         gtt_entries = MB(320) - KB(size);
623                         break;
624                 case SNB_GMCH_GMS_STOLEN_352M:
625                         gtt_entries = MB(352) - KB(size);
626                         break;
627                 case SNB_GMCH_GMS_STOLEN_384M:
628                         gtt_entries = MB(384) - KB(size);
629                         break;
630                 case SNB_GMCH_GMS_STOLEN_416M:
631                         gtt_entries = MB(416) - KB(size);
632                         break;
633                 case SNB_GMCH_GMS_STOLEN_448M:
634                         gtt_entries = MB(448) - KB(size);
635                         break;
636                 case SNB_GMCH_GMS_STOLEN_480M:
637                         gtt_entries = MB(480) - KB(size);
638                         break;
639                 case SNB_GMCH_GMS_STOLEN_512M:
640                         gtt_entries = MB(512) - KB(size);
641                         break;
642                 }
643         } else {
644                 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
645                 case I855_GMCH_GMS_STOLEN_1M:
646                         gtt_entries = MB(1) - KB(size);
647                         break;
648                 case I855_GMCH_GMS_STOLEN_4M:
649                         gtt_entries = MB(4) - KB(size);
650                         break;
651                 case I855_GMCH_GMS_STOLEN_8M:
652                         gtt_entries = MB(8) - KB(size);
653                         break;
654                 case I855_GMCH_GMS_STOLEN_16M:
655                         gtt_entries = MB(16) - KB(size);
656                         break;
657                 case I855_GMCH_GMS_STOLEN_32M:
658                         gtt_entries = MB(32) - KB(size);
659                         break;
660                 case I915_GMCH_GMS_STOLEN_48M:
661                         /* Check it's really I915G */
662                         if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
663                                 gtt_entries = MB(48) - KB(size);
664                         else
665                                 gtt_entries = 0;
666                         break;
667                 case I915_GMCH_GMS_STOLEN_64M:
668                         /* Check it's really I915G */
669                         if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
670                                 gtt_entries = MB(64) - KB(size);
671                         else
672                                 gtt_entries = 0;
673                         break;
674                 case G33_GMCH_GMS_STOLEN_128M:
675                         if (IS_G33 || IS_I965 || IS_G4X)
676                                 gtt_entries = MB(128) - KB(size);
677                         else
678                                 gtt_entries = 0;
679                         break;
680                 case G33_GMCH_GMS_STOLEN_256M:
681                         if (IS_G33 || IS_I965 || IS_G4X)
682                                 gtt_entries = MB(256) - KB(size);
683                         else
684                                 gtt_entries = 0;
685                         break;
686                 case INTEL_GMCH_GMS_STOLEN_96M:
687                         if (IS_I965 || IS_G4X)
688                                 gtt_entries = MB(96) - KB(size);
689                         else
690                                 gtt_entries = 0;
691                         break;
692                 case INTEL_GMCH_GMS_STOLEN_160M:
693                         if (IS_I965 || IS_G4X)
694                                 gtt_entries = MB(160) - KB(size);
695                         else
696                                 gtt_entries = 0;
697                         break;
698                 case INTEL_GMCH_GMS_STOLEN_224M:
699                         if (IS_I965 || IS_G4X)
700                                 gtt_entries = MB(224) - KB(size);
701                         else
702                                 gtt_entries = 0;
703                         break;
704                 case INTEL_GMCH_GMS_STOLEN_352M:
705                         if (IS_I965 || IS_G4X)
706                                 gtt_entries = MB(352) - KB(size);
707                         else
708                                 gtt_entries = 0;
709                         break;
710                 default:
711                         gtt_entries = 0;
712                         break;
713                 }
714         }
715         if (gtt_entries > 0) {
716                 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
717                        gtt_entries / KB(1), local ? "local" : "stolen");
718                 gtt_entries /= KB(4);
719         } else {
720                 dev_info(&agp_bridge->dev->dev,
721                        "no pre-allocated video memory detected\n");
722                 gtt_entries = 0;
723         }
724
725         intel_private.gtt_entries = gtt_entries;
726 }
727
728 static void intel_i830_fini_flush(void)
729 {
730         kunmap(intel_private.i8xx_page);
731         intel_private.i8xx_flush_page = NULL;
732         unmap_page_from_agp(intel_private.i8xx_page);
733
734         __free_page(intel_private.i8xx_page);
735         intel_private.i8xx_page = NULL;
736 }
737
738 static void intel_i830_setup_flush(void)
739 {
740         /* return if we've already set the flush mechanism up */
741         if (intel_private.i8xx_page)
742                 return;
743
744         intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
745         if (!intel_private.i8xx_page)
746                 return;
747
748         intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
749         if (!intel_private.i8xx_flush_page)
750                 intel_i830_fini_flush();
751 }
752
753 /* The chipset_flush interface needs to get data that has already been
754  * flushed out of the CPU all the way out to main memory, because the GPU
755  * doesn't snoop those buffers.
756  *
757  * The 8xx series doesn't have the same lovely interface for flushing the
758  * chipset write buffers that the later chips do. According to the 865
759  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
760  * that buffer out, we just fill 1KB and clflush it out, on the assumption
761  * that it'll push whatever was in there out.  It appears to work.
762  */
763 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
764 {
765         unsigned int *pg = intel_private.i8xx_flush_page;
766
767         memset(pg, 0, 1024);
768
769         if (cpu_has_clflush)
770                 clflush_cache_range(pg, 1024);
771         else if (wbinvd_on_all_cpus() != 0)
772                 printk(KERN_ERR "Timed out waiting for cache flush.\n");
773 }
774
775 /* The intel i830 automatically initializes the agp aperture during POST.
776  * Use the memory already set aside for in the GTT.
777  */
778 static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
779 {
780         int page_order;
781         struct aper_size_info_fixed *size;
782         int num_entries;
783         u32 temp;
784
785         size = agp_bridge->current_size;
786         page_order = size->page_order;
787         num_entries = size->num_entries;
788         agp_bridge->gatt_table_real = NULL;
789
790         pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
791         temp &= 0xfff80000;
792
793         intel_private.registers = ioremap(temp, 128 * 4096);
794         if (!intel_private.registers)
795                 return -ENOMEM;
796
797         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
798         global_cache_flush();   /* FIXME: ?? */
799
800         /* we have to call this as early as possible after the MMIO base address is known */
801         intel_i830_init_gtt_entries();
802
803         agp_bridge->gatt_table = NULL;
804
805         agp_bridge->gatt_bus_addr = temp;
806
807         return 0;
808 }
809
810 /* Return the gatt table to a sane state. Use the top of stolen
811  * memory for the GTT.
812  */
813 static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
814 {
815         return 0;
816 }
817
818 static int intel_i830_fetch_size(void)
819 {
820         u16 gmch_ctrl;
821         struct aper_size_info_fixed *values;
822
823         values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
824
825         if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
826             agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
827                 /* 855GM/852GM/865G has 128MB aperture size */
828                 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
829                 agp_bridge->aperture_size_idx = 0;
830                 return values[0].size;
831         }
832
833         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
834
835         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
836                 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
837                 agp_bridge->aperture_size_idx = 0;
838                 return values[0].size;
839         } else {
840                 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
841                 agp_bridge->aperture_size_idx = 1;
842                 return values[1].size;
843         }
844
845         return 0;
846 }
847
848 static int intel_i830_configure(void)
849 {
850         struct aper_size_info_fixed *current_size;
851         u32 temp;
852         u16 gmch_ctrl;
853         int i;
854
855         current_size = A_SIZE_FIX(agp_bridge->current_size);
856
857         pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
858         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
859
860         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
861         gmch_ctrl |= I830_GMCH_ENABLED;
862         pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
863
864         writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
865         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
866
867         if (agp_bridge->driver->needs_scratch_page) {
868                 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
869                         writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
870                 }
871                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
872         }
873
874         global_cache_flush();
875
876         intel_i830_setup_flush();
877         return 0;
878 }
879
880 static void intel_i830_cleanup(void)
881 {
882         iounmap(intel_private.registers);
883 }
884
885 static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
886                                      int type)
887 {
888         int i, j, num_entries;
889         void *temp;
890         int ret = -EINVAL;
891         int mask_type;
892
893         if (mem->page_count == 0)
894                 goto out;
895
896         temp = agp_bridge->current_size;
897         num_entries = A_SIZE_FIX(temp)->num_entries;
898
899         if (pg_start < intel_private.gtt_entries) {
900                 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
901                            "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
902                            pg_start, intel_private.gtt_entries);
903
904                 dev_info(&intel_private.pcidev->dev,
905                          "trying to insert into local/stolen memory\n");
906                 goto out_err;
907         }
908
909         if ((pg_start + mem->page_count) > num_entries)
910                 goto out_err;
911
912         /* The i830 can't check the GTT for entries since its read only,
913          * depend on the caller to make the correct offset decisions.
914          */
915
916         if (type != mem->type)
917                 goto out_err;
918
919         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
920
921         if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
922             mask_type != INTEL_AGP_CACHED_MEMORY)
923                 goto out_err;
924
925         if (!mem->is_flushed)
926                 global_cache_flush();
927
928         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
929                 writel(agp_bridge->driver->mask_memory(agp_bridge,
930                                 page_to_phys(mem->pages[i]), mask_type),
931                        intel_private.registers+I810_PTE_BASE+(j*4));
932         }
933         readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
934
935 out:
936         ret = 0;
937 out_err:
938         mem->is_flushed = true;
939         return ret;
940 }
941
942 static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
943                                      int type)
944 {
945         int i;
946
947         if (mem->page_count == 0)
948                 return 0;
949
950         if (pg_start < intel_private.gtt_entries) {
951                 dev_info(&intel_private.pcidev->dev,
952                          "trying to disable local/stolen memory\n");
953                 return -EINVAL;
954         }
955
956         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
957                 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
958         }
959         readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
960
961         return 0;
962 }
963
964 static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
965 {
966         if (type == AGP_PHYS_MEMORY)
967                 return alloc_agpphysmem_i8xx(pg_count, type);
968         /* always return NULL for other allocation types for now */
969         return NULL;
970 }
971
972 static int intel_alloc_chipset_flush_resource(void)
973 {
974         int ret;
975         ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
976                                      PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
977                                      pcibios_align_resource, agp_bridge->dev);
978
979         return ret;
980 }
981
982 static void intel_i915_setup_chipset_flush(void)
983 {
984         int ret;
985         u32 temp;
986
987         pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
988         if (!(temp & 0x1)) {
989                 intel_alloc_chipset_flush_resource();
990                 intel_private.resource_valid = 1;
991                 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
992         } else {
993                 temp &= ~1;
994
995                 intel_private.resource_valid = 1;
996                 intel_private.ifp_resource.start = temp;
997                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
998                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
999                 /* some BIOSes reserve this area in a pnp some don't */
1000                 if (ret)
1001                         intel_private.resource_valid = 0;
1002         }
1003 }
1004
1005 static void intel_i965_g33_setup_chipset_flush(void)
1006 {
1007         u32 temp_hi, temp_lo;
1008         int ret;
1009
1010         pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1011         pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1012
1013         if (!(temp_lo & 0x1)) {
1014
1015                 intel_alloc_chipset_flush_resource();
1016
1017                 intel_private.resource_valid = 1;
1018                 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1019                         upper_32_bits(intel_private.ifp_resource.start));
1020                 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1021         } else {
1022                 u64 l64;
1023
1024                 temp_lo &= ~0x1;
1025                 l64 = ((u64)temp_hi << 32) | temp_lo;
1026
1027                 intel_private.resource_valid = 1;
1028                 intel_private.ifp_resource.start = l64;
1029                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1030                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1031                 /* some BIOSes reserve this area in a pnp some don't */
1032                 if (ret)
1033                         intel_private.resource_valid = 0;
1034         }
1035 }
1036
1037 static void intel_i9xx_setup_flush(void)
1038 {
1039         /* return if already configured */
1040         if (intel_private.ifp_resource.start)
1041                 return;
1042
1043         if (IS_SNB)
1044                 return;
1045
1046         /* setup a resource for this object */
1047         intel_private.ifp_resource.name = "Intel Flush Page";
1048         intel_private.ifp_resource.flags = IORESOURCE_MEM;
1049
1050         /* Setup chipset flush for 915 */
1051         if (IS_I965 || IS_G33 || IS_G4X) {
1052                 intel_i965_g33_setup_chipset_flush();
1053         } else {
1054                 intel_i915_setup_chipset_flush();
1055         }
1056
1057         if (intel_private.ifp_resource.start) {
1058                 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1059                 if (!intel_private.i9xx_flush_page)
1060                         dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1061         }
1062 }
1063
1064 static int intel_i915_configure(void)
1065 {
1066         struct aper_size_info_fixed *current_size;
1067         u32 temp;
1068         u16 gmch_ctrl;
1069         int i;
1070
1071         current_size = A_SIZE_FIX(agp_bridge->current_size);
1072
1073         pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1074
1075         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1076
1077         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1078         gmch_ctrl |= I830_GMCH_ENABLED;
1079         pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1080
1081         writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1082         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1083
1084         if (agp_bridge->driver->needs_scratch_page) {
1085                 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1086                         writel(agp_bridge->scratch_page, intel_private.gtt+i);
1087                 }
1088                 readl(intel_private.gtt+i-1);   /* PCI Posting. */
1089         }
1090
1091         global_cache_flush();
1092
1093         intel_i9xx_setup_flush();
1094
1095         return 0;
1096 }
1097
1098 static void intel_i915_cleanup(void)
1099 {
1100         if (intel_private.i9xx_flush_page)
1101                 iounmap(intel_private.i9xx_flush_page);
1102         if (intel_private.resource_valid)
1103                 release_resource(&intel_private.ifp_resource);
1104         intel_private.ifp_resource.start = 0;
1105         intel_private.resource_valid = 0;
1106         iounmap(intel_private.gtt);
1107         iounmap(intel_private.registers);
1108 }
1109
1110 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1111 {
1112         if (intel_private.i9xx_flush_page)
1113                 writel(1, intel_private.i9xx_flush_page);
1114 }
1115
1116 static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1117                                      int type)
1118 {
1119         int num_entries;
1120         void *temp;
1121         int ret = -EINVAL;
1122         int mask_type;
1123
1124         if (mem->page_count == 0)
1125                 goto out;
1126
1127         temp = agp_bridge->current_size;
1128         num_entries = A_SIZE_FIX(temp)->num_entries;
1129
1130         if (pg_start < intel_private.gtt_entries) {
1131                 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1132                            "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1133                            pg_start, intel_private.gtt_entries);
1134
1135                 dev_info(&intel_private.pcidev->dev,
1136                          "trying to insert into local/stolen memory\n");
1137                 goto out_err;
1138         }
1139
1140         if ((pg_start + mem->page_count) > num_entries)
1141                 goto out_err;
1142
1143         /* The i915 can't check the GTT for entries since it's read only;
1144          * depend on the caller to make the correct offset decisions.
1145          */
1146
1147         if (type != mem->type)
1148                 goto out_err;
1149
1150         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1151
1152         if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1153             mask_type != INTEL_AGP_CACHED_MEMORY)
1154                 goto out_err;
1155
1156         if (!mem->is_flushed)
1157                 global_cache_flush();
1158
1159         intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1160
1161  out:
1162         ret = 0;
1163  out_err:
1164         mem->is_flushed = true;
1165         return ret;
1166 }
1167
1168 static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1169                                      int type)
1170 {
1171         int i;
1172
1173         if (mem->page_count == 0)
1174                 return 0;
1175
1176         if (pg_start < intel_private.gtt_entries) {
1177                 dev_info(&intel_private.pcidev->dev,
1178                          "trying to disable local/stolen memory\n");
1179                 return -EINVAL;
1180         }
1181
1182         for (i = pg_start; i < (mem->page_count + pg_start); i++)
1183                 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1184
1185         readl(intel_private.gtt+i-1);
1186
1187         return 0;
1188 }
1189
1190 /* Return the aperture size by just checking the resource length.  The effect
1191  * described in the spec of the MSAC registers is just changing of the
1192  * resource size.
1193  */
1194 static int intel_i9xx_fetch_size(void)
1195 {
1196         int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1197         int aper_size; /* size in megabytes */
1198         int i;
1199
1200         aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1201
1202         for (i = 0; i < num_sizes; i++) {
1203                 if (aper_size == intel_i830_sizes[i].size) {
1204                         agp_bridge->current_size = intel_i830_sizes + i;
1205                         agp_bridge->previous_size = agp_bridge->current_size;
1206                         return aper_size;
1207                 }
1208         }
1209
1210         return 0;
1211 }
1212
1213 /* The intel i915 automatically initializes the agp aperture during POST.
1214  * Use the memory already set aside for in the GTT.
1215  */
1216 static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1217 {
1218         int page_order;
1219         struct aper_size_info_fixed *size;
1220         int num_entries;
1221         u32 temp, temp2;
1222         int gtt_map_size = 256 * 1024;
1223
1224         size = agp_bridge->current_size;
1225         page_order = size->page_order;
1226         num_entries = size->num_entries;
1227         agp_bridge->gatt_table_real = NULL;
1228
1229         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1230         pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1231
1232         if (IS_G33)
1233             gtt_map_size = 1024 * 1024; /* 1M on G33 */
1234         intel_private.gtt = ioremap(temp2, gtt_map_size);
1235         if (!intel_private.gtt)
1236                 return -ENOMEM;
1237
1238         intel_private.gtt_total_size = gtt_map_size / 4;
1239
1240         temp &= 0xfff80000;
1241
1242         intel_private.registers = ioremap(temp, 128 * 4096);
1243         if (!intel_private.registers) {
1244                 iounmap(intel_private.gtt);
1245                 return -ENOMEM;
1246         }
1247
1248         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1249         global_cache_flush();   /* FIXME: ? */
1250
1251         /* we have to call this as early as possible after the MMIO base address is known */
1252         intel_i830_init_gtt_entries();
1253
1254         agp_bridge->gatt_table = NULL;
1255
1256         agp_bridge->gatt_bus_addr = temp;
1257
1258         return 0;
1259 }
1260
1261 /*
1262  * The i965 supports 36-bit physical addresses, but to keep
1263  * the format of the GTT the same, the bits that don't fit
1264  * in a 32-bit word are shifted down to bits 4..7.
1265  *
1266  * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1267  * is always zero on 32-bit architectures, so no need to make
1268  * this conditional.
1269  */
1270 static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1271                                             dma_addr_t addr, int type)
1272 {
1273         /* Shift high bits down */
1274         addr |= (addr >> 28) & 0xf0;
1275
1276         /* Type checking must be done elsewhere */
1277         return addr | bridge->driver->masks[type].mask;
1278 }
1279
1280 static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1281 {
1282         u16 snb_gmch_ctl;
1283
1284         switch (agp_bridge->dev->device) {
1285         case PCI_DEVICE_ID_INTEL_GM45_HB:
1286         case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1287         case PCI_DEVICE_ID_INTEL_Q45_HB:
1288         case PCI_DEVICE_ID_INTEL_G45_HB:
1289         case PCI_DEVICE_ID_INTEL_G41_HB:
1290         case PCI_DEVICE_ID_INTEL_B43_HB:
1291         case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1292         case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1293         case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1294         case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1295                 *gtt_offset = *gtt_size = MB(2);
1296                 break;
1297         case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1298         case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1299                 *gtt_offset = MB(2);
1300
1301                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1302                 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1303                 default:
1304                 case SNB_GTT_SIZE_0M:
1305                         printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1306                         *gtt_size = MB(0);
1307                         break;
1308                 case SNB_GTT_SIZE_1M:
1309                         *gtt_size = MB(1);
1310                         break;
1311                 case SNB_GTT_SIZE_2M:
1312                         *gtt_size = MB(2);
1313                         break;
1314                 }
1315                 break;
1316         default:
1317                 *gtt_offset = *gtt_size = KB(512);
1318         }
1319 }
1320
1321 /* The intel i965 automatically initializes the agp aperture during POST.
1322  * Use the memory already set aside for in the GTT.
1323  */
1324 static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1325 {
1326         int page_order;
1327         struct aper_size_info_fixed *size;
1328         int num_entries;
1329         u32 temp;
1330         int gtt_offset, gtt_size;
1331
1332         size = agp_bridge->current_size;
1333         page_order = size->page_order;
1334         num_entries = size->num_entries;
1335         agp_bridge->gatt_table_real = NULL;
1336
1337         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1338
1339         temp &= 0xfff00000;
1340
1341         intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1342
1343         intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1344
1345         if (!intel_private.gtt)
1346                 return -ENOMEM;
1347
1348         intel_private.gtt_total_size = gtt_size / 4;
1349
1350         intel_private.registers = ioremap(temp, 128 * 4096);
1351         if (!intel_private.registers) {
1352                 iounmap(intel_private.gtt);
1353                 return -ENOMEM;
1354         }
1355
1356         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1357         global_cache_flush();   /* FIXME: ? */
1358
1359         /* we have to call this as early as possible after the MMIO base address is known */
1360         intel_i830_init_gtt_entries();
1361
1362         agp_bridge->gatt_table = NULL;
1363
1364         agp_bridge->gatt_bus_addr = temp;
1365
1366         return 0;
1367 }
1368
1369 static const struct agp_bridge_driver intel_810_driver = {
1370         .owner                  = THIS_MODULE,
1371         .aperture_sizes         = intel_i810_sizes,
1372         .size_type              = FIXED_APER_SIZE,
1373         .num_aperture_sizes     = 2,
1374         .needs_scratch_page     = true,
1375         .configure              = intel_i810_configure,
1376         .fetch_size             = intel_i810_fetch_size,
1377         .cleanup                = intel_i810_cleanup,
1378         .mask_memory            = intel_i810_mask_memory,
1379         .masks                  = intel_i810_masks,
1380         .agp_enable             = intel_i810_agp_enable,
1381         .cache_flush            = global_cache_flush,
1382         .create_gatt_table      = agp_generic_create_gatt_table,
1383         .free_gatt_table        = agp_generic_free_gatt_table,
1384         .insert_memory          = intel_i810_insert_entries,
1385         .remove_memory          = intel_i810_remove_entries,
1386         .alloc_by_type          = intel_i810_alloc_by_type,
1387         .free_by_type           = intel_i810_free_by_type,
1388         .agp_alloc_page         = agp_generic_alloc_page,
1389         .agp_alloc_pages        = agp_generic_alloc_pages,
1390         .agp_destroy_page       = agp_generic_destroy_page,
1391         .agp_destroy_pages      = agp_generic_destroy_pages,
1392         .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1393 };
1394
1395 static const struct agp_bridge_driver intel_830_driver = {
1396         .owner                  = THIS_MODULE,
1397         .aperture_sizes         = intel_i830_sizes,
1398         .size_type              = FIXED_APER_SIZE,
1399         .num_aperture_sizes     = 4,
1400         .needs_scratch_page     = true,
1401         .configure              = intel_i830_configure,
1402         .fetch_size             = intel_i830_fetch_size,
1403         .cleanup                = intel_i830_cleanup,
1404         .mask_memory            = intel_i810_mask_memory,
1405         .masks                  = intel_i810_masks,
1406         .agp_enable             = intel_i810_agp_enable,
1407         .cache_flush            = global_cache_flush,
1408         .create_gatt_table      = intel_i830_create_gatt_table,
1409         .free_gatt_table        = intel_i830_free_gatt_table,
1410         .insert_memory          = intel_i830_insert_entries,
1411         .remove_memory          = intel_i830_remove_entries,
1412         .alloc_by_type          = intel_i830_alloc_by_type,
1413         .free_by_type           = intel_i810_free_by_type,
1414         .agp_alloc_page         = agp_generic_alloc_page,
1415         .agp_alloc_pages        = agp_generic_alloc_pages,
1416         .agp_destroy_page       = agp_generic_destroy_page,
1417         .agp_destroy_pages      = agp_generic_destroy_pages,
1418         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1419         .chipset_flush          = intel_i830_chipset_flush,
1420 };
1421
1422 static const struct agp_bridge_driver intel_915_driver = {
1423         .owner                  = THIS_MODULE,
1424         .aperture_sizes         = intel_i830_sizes,
1425         .size_type              = FIXED_APER_SIZE,
1426         .num_aperture_sizes     = 4,
1427         .needs_scratch_page     = true,
1428         .configure              = intel_i915_configure,
1429         .fetch_size             = intel_i9xx_fetch_size,
1430         .cleanup                = intel_i915_cleanup,
1431         .mask_memory            = intel_i810_mask_memory,
1432         .masks                  = intel_i810_masks,
1433         .agp_enable             = intel_i810_agp_enable,
1434         .cache_flush            = global_cache_flush,
1435         .create_gatt_table      = intel_i915_create_gatt_table,
1436         .free_gatt_table        = intel_i830_free_gatt_table,
1437         .insert_memory          = intel_i915_insert_entries,
1438         .remove_memory          = intel_i915_remove_entries,
1439         .alloc_by_type          = intel_i830_alloc_by_type,
1440         .free_by_type           = intel_i810_free_by_type,
1441         .agp_alloc_page         = agp_generic_alloc_page,
1442         .agp_alloc_pages        = agp_generic_alloc_pages,
1443         .agp_destroy_page       = agp_generic_destroy_page,
1444         .agp_destroy_pages      = agp_generic_destroy_pages,
1445         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1446         .chipset_flush          = intel_i915_chipset_flush,
1447 #ifdef USE_PCI_DMA_API
1448         .agp_map_page           = intel_agp_map_page,
1449         .agp_unmap_page         = intel_agp_unmap_page,
1450         .agp_map_memory         = intel_agp_map_memory,
1451         .agp_unmap_memory       = intel_agp_unmap_memory,
1452 #endif
1453 };
1454
1455 static const struct agp_bridge_driver intel_i965_driver = {
1456         .owner                  = THIS_MODULE,
1457         .aperture_sizes         = intel_i830_sizes,
1458         .size_type              = FIXED_APER_SIZE,
1459         .num_aperture_sizes     = 4,
1460         .needs_scratch_page     = true,
1461         .configure              = intel_i915_configure,
1462         .fetch_size             = intel_i9xx_fetch_size,
1463         .cleanup                = intel_i915_cleanup,
1464         .mask_memory            = intel_i965_mask_memory,
1465         .masks                  = intel_i810_masks,
1466         .agp_enable             = intel_i810_agp_enable,
1467         .cache_flush            = global_cache_flush,
1468         .create_gatt_table      = intel_i965_create_gatt_table,
1469         .free_gatt_table        = intel_i830_free_gatt_table,
1470         .insert_memory          = intel_i915_insert_entries,
1471         .remove_memory          = intel_i915_remove_entries,
1472         .alloc_by_type          = intel_i830_alloc_by_type,
1473         .free_by_type           = intel_i810_free_by_type,
1474         .agp_alloc_page         = agp_generic_alloc_page,
1475         .agp_alloc_pages        = agp_generic_alloc_pages,
1476         .agp_destroy_page       = agp_generic_destroy_page,
1477         .agp_destroy_pages      = agp_generic_destroy_pages,
1478         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1479         .chipset_flush          = intel_i915_chipset_flush,
1480 #ifdef USE_PCI_DMA_API
1481         .agp_map_page           = intel_agp_map_page,
1482         .agp_unmap_page         = intel_agp_unmap_page,
1483         .agp_map_memory         = intel_agp_map_memory,
1484         .agp_unmap_memory       = intel_agp_unmap_memory,
1485 #endif
1486 };
1487
1488 static const struct agp_bridge_driver intel_g33_driver = {
1489         .owner                  = THIS_MODULE,
1490         .aperture_sizes         = intel_i830_sizes,
1491         .size_type              = FIXED_APER_SIZE,
1492         .num_aperture_sizes     = 4,
1493         .needs_scratch_page     = true,
1494         .configure              = intel_i915_configure,
1495         .fetch_size             = intel_i9xx_fetch_size,
1496         .cleanup                = intel_i915_cleanup,
1497         .mask_memory            = intel_i965_mask_memory,
1498         .masks                  = intel_i810_masks,
1499         .agp_enable             = intel_i810_agp_enable,
1500         .cache_flush            = global_cache_flush,
1501         .create_gatt_table      = intel_i915_create_gatt_table,
1502         .free_gatt_table        = intel_i830_free_gatt_table,
1503         .insert_memory          = intel_i915_insert_entries,
1504         .remove_memory          = intel_i915_remove_entries,
1505         .alloc_by_type          = intel_i830_alloc_by_type,
1506         .free_by_type           = intel_i810_free_by_type,
1507         .agp_alloc_page         = agp_generic_alloc_page,
1508         .agp_alloc_pages        = agp_generic_alloc_pages,
1509         .agp_destroy_page       = agp_generic_destroy_page,
1510         .agp_destroy_pages      = agp_generic_destroy_pages,
1511         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1512         .chipset_flush          = intel_i915_chipset_flush,
1513 #ifdef USE_PCI_DMA_API
1514         .agp_map_page           = intel_agp_map_page,
1515         .agp_unmap_page         = intel_agp_unmap_page,
1516         .agp_map_memory         = intel_agp_map_memory,
1517         .agp_unmap_memory       = intel_agp_unmap_memory,
1518 #endif
1519 };