drm/radeon/kms: add initial Evergreen support (Radeon HD 5xxx)
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / radeon_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/console.h>
29 #include <drm/drmP.h>
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/radeon_drm.h>
32 #include <linux/vgaarb.h>
33 #include "radeon_reg.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "atom.h"
37
38 /*
39  * Clear GPU surface registers.
40  */
41 void radeon_surface_init(struct radeon_device *rdev)
42 {
43         /* FIXME: check this out */
44         if (rdev->family < CHIP_R600) {
45                 int i;
46
47                 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
48                         if (rdev->surface_regs[i].bo)
49                                 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
50                         else
51                                 radeon_clear_surface_reg(rdev, i);
52                 }
53                 /* enable surfaces */
54                 WREG32(RADEON_SURFACE_CNTL, 0);
55         }
56 }
57
58 /*
59  * GPU scratch registers helpers function.
60  */
61 void radeon_scratch_init(struct radeon_device *rdev)
62 {
63         int i;
64
65         /* FIXME: check this out */
66         if (rdev->family < CHIP_R300) {
67                 rdev->scratch.num_reg = 5;
68         } else {
69                 rdev->scratch.num_reg = 7;
70         }
71         for (i = 0; i < rdev->scratch.num_reg; i++) {
72                 rdev->scratch.free[i] = true;
73                 rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
74         }
75 }
76
77 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
78 {
79         int i;
80
81         for (i = 0; i < rdev->scratch.num_reg; i++) {
82                 if (rdev->scratch.free[i]) {
83                         rdev->scratch.free[i] = false;
84                         *reg = rdev->scratch.reg[i];
85                         return 0;
86                 }
87         }
88         return -EINVAL;
89 }
90
91 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
92 {
93         int i;
94
95         for (i = 0; i < rdev->scratch.num_reg; i++) {
96                 if (rdev->scratch.reg[i] == reg) {
97                         rdev->scratch.free[i] = true;
98                         return;
99                 }
100         }
101 }
102
103 /*
104  * MC common functions
105  */
106 int radeon_mc_setup(struct radeon_device *rdev)
107 {
108         uint32_t tmp;
109
110         /* Some chips have an "issue" with the memory controller, the
111          * location must be aligned to the size. We just align it down,
112          * too bad if we walk over the top of system memory, we don't
113          * use DMA without a remapped anyway.
114          * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
115          */
116         /* FGLRX seems to setup like this, VRAM a 0, then GART.
117          */
118         /*
119          * Note: from R6xx the address space is 40bits but here we only
120          * use 32bits (still have to see a card which would exhaust 4G
121          * address space).
122          */
123         if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
124                 /* vram location was already setup try to put gtt after
125                  * if it fits */
126                 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
127                 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
128                 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
129                         rdev->mc.gtt_location = tmp;
130                 } else {
131                         if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
132                                 printk(KERN_ERR "[drm] GTT too big to fit "
133                                        "before or after vram location.\n");
134                                 return -EINVAL;
135                         }
136                         rdev->mc.gtt_location = 0;
137                 }
138         } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
139                 /* gtt location was already setup try to put vram before
140                  * if it fits */
141                 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
142                         rdev->mc.vram_location = 0;
143                 } else {
144                         tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
145                         tmp += (rdev->mc.mc_vram_size - 1);
146                         tmp &= ~(rdev->mc.mc_vram_size - 1);
147                         if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
148                                 rdev->mc.vram_location = tmp;
149                         } else {
150                                 printk(KERN_ERR "[drm] vram too big to fit "
151                                        "before or after GTT location.\n");
152                                 return -EINVAL;
153                         }
154                 }
155         } else {
156                 rdev->mc.vram_location = 0;
157                 tmp = rdev->mc.mc_vram_size;
158                 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
159                 rdev->mc.gtt_location = tmp;
160         }
161         rdev->mc.vram_start = rdev->mc.vram_location;
162         rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
163         rdev->mc.gtt_start = rdev->mc.gtt_location;
164         rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
165         DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
166         DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
167                  (unsigned)rdev->mc.vram_location,
168                  (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
169         DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
170         DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
171                  (unsigned)rdev->mc.gtt_location,
172                  (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
173         return 0;
174 }
175
176
177 /*
178  * GPU helpers function.
179  */
180 bool radeon_card_posted(struct radeon_device *rdev)
181 {
182         uint32_t reg;
183
184         /* first check CRTCs */
185         if (ASIC_IS_DCE4(rdev)) {
186                 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
187                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
188                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
189                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
190                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
191                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
192                 if (reg & EVERGREEN_CRTC_MASTER_EN)
193                         return true;
194         } else if (ASIC_IS_AVIVO(rdev)) {
195                 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
196                       RREG32(AVIVO_D2CRTC_CONTROL);
197                 if (reg & AVIVO_CRTC_EN) {
198                         return true;
199                 }
200         } else {
201                 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
202                       RREG32(RADEON_CRTC2_GEN_CNTL);
203                 if (reg & RADEON_CRTC_EN) {
204                         return true;
205                 }
206         }
207
208         /* then check MEM_SIZE, in case the crtcs are off */
209         if (rdev->family >= CHIP_R600)
210                 reg = RREG32(R600_CONFIG_MEMSIZE);
211         else
212                 reg = RREG32(RADEON_CONFIG_MEMSIZE);
213
214         if (reg)
215                 return true;
216
217         return false;
218
219 }
220
221 bool radeon_boot_test_post_card(struct radeon_device *rdev)
222 {
223         if (radeon_card_posted(rdev))
224                 return true;
225
226         if (rdev->bios) {
227                 DRM_INFO("GPU not posted. posting now...\n");
228                 if (rdev->is_atom_bios)
229                         atom_asic_init(rdev->mode_info.atom_context);
230                 else
231                         radeon_combios_asic_init(rdev->ddev);
232                 return true;
233         } else {
234                 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
235                 return false;
236         }
237 }
238
239 int radeon_dummy_page_init(struct radeon_device *rdev)
240 {
241         rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
242         if (rdev->dummy_page.page == NULL)
243                 return -ENOMEM;
244         rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
245                                         0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
246         if (!rdev->dummy_page.addr) {
247                 __free_page(rdev->dummy_page.page);
248                 rdev->dummy_page.page = NULL;
249                 return -ENOMEM;
250         }
251         return 0;
252 }
253
254 void radeon_dummy_page_fini(struct radeon_device *rdev)
255 {
256         if (rdev->dummy_page.page == NULL)
257                 return;
258         pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
259                         PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
260         __free_page(rdev->dummy_page.page);
261         rdev->dummy_page.page = NULL;
262 }
263
264
265 /*
266  * Registers accessors functions.
267  */
268 uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
269 {
270         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
271         BUG_ON(1);
272         return 0;
273 }
274
275 void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
276 {
277         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
278                   reg, v);
279         BUG_ON(1);
280 }
281
282 void radeon_register_accessor_init(struct radeon_device *rdev)
283 {
284         rdev->mc_rreg = &radeon_invalid_rreg;
285         rdev->mc_wreg = &radeon_invalid_wreg;
286         rdev->pll_rreg = &radeon_invalid_rreg;
287         rdev->pll_wreg = &radeon_invalid_wreg;
288         rdev->pciep_rreg = &radeon_invalid_rreg;
289         rdev->pciep_wreg = &radeon_invalid_wreg;
290
291         /* Don't change order as we are overridding accessor. */
292         if (rdev->family < CHIP_RV515) {
293                 rdev->pcie_reg_mask = 0xff;
294         } else {
295                 rdev->pcie_reg_mask = 0x7ff;
296         }
297         /* FIXME: not sure here */
298         if (rdev->family <= CHIP_R580) {
299                 rdev->pll_rreg = &r100_pll_rreg;
300                 rdev->pll_wreg = &r100_pll_wreg;
301         }
302         if (rdev->family >= CHIP_R420) {
303                 rdev->mc_rreg = &r420_mc_rreg;
304                 rdev->mc_wreg = &r420_mc_wreg;
305         }
306         if (rdev->family >= CHIP_RV515) {
307                 rdev->mc_rreg = &rv515_mc_rreg;
308                 rdev->mc_wreg = &rv515_mc_wreg;
309         }
310         if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
311                 rdev->mc_rreg = &rs400_mc_rreg;
312                 rdev->mc_wreg = &rs400_mc_wreg;
313         }
314         if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
315                 rdev->mc_rreg = &rs690_mc_rreg;
316                 rdev->mc_wreg = &rs690_mc_wreg;
317         }
318         if (rdev->family == CHIP_RS600) {
319                 rdev->mc_rreg = &rs600_mc_rreg;
320                 rdev->mc_wreg = &rs600_mc_wreg;
321         }
322         if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
323                 rdev->pciep_rreg = &r600_pciep_rreg;
324                 rdev->pciep_wreg = &r600_pciep_wreg;
325         }
326 }
327
328
329 /*
330  * ASIC
331  */
332 int radeon_asic_init(struct radeon_device *rdev)
333 {
334         radeon_register_accessor_init(rdev);
335         switch (rdev->family) {
336         case CHIP_R100:
337         case CHIP_RV100:
338         case CHIP_RS100:
339         case CHIP_RV200:
340         case CHIP_RS200:
341         case CHIP_R200:
342         case CHIP_RV250:
343         case CHIP_RS300:
344         case CHIP_RV280:
345                 rdev->asic = &r100_asic;
346                 break;
347         case CHIP_R300:
348         case CHIP_R350:
349         case CHIP_RV350:
350         case CHIP_RV380:
351                 rdev->asic = &r300_asic;
352                 if (rdev->flags & RADEON_IS_PCIE) {
353                         rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
354                         rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
355                 }
356                 break;
357         case CHIP_R420:
358         case CHIP_R423:
359         case CHIP_RV410:
360                 rdev->asic = &r420_asic;
361                 break;
362         case CHIP_RS400:
363         case CHIP_RS480:
364                 rdev->asic = &rs400_asic;
365                 break;
366         case CHIP_RS600:
367                 rdev->asic = &rs600_asic;
368                 break;
369         case CHIP_RS690:
370         case CHIP_RS740:
371                 rdev->asic = &rs690_asic;
372                 break;
373         case CHIP_RV515:
374                 rdev->asic = &rv515_asic;
375                 break;
376         case CHIP_R520:
377         case CHIP_RV530:
378         case CHIP_RV560:
379         case CHIP_RV570:
380         case CHIP_R580:
381                 rdev->asic = &r520_asic;
382                 break;
383         case CHIP_R600:
384         case CHIP_RV610:
385         case CHIP_RV630:
386         case CHIP_RV620:
387         case CHIP_RV635:
388         case CHIP_RV670:
389         case CHIP_RS780:
390         case CHIP_RS880:
391                 rdev->asic = &r600_asic;
392                 break;
393         case CHIP_RV770:
394         case CHIP_RV730:
395         case CHIP_RV710:
396         case CHIP_RV740:
397                 rdev->asic = &rv770_asic;
398                 break;
399         case CHIP_CEDAR:
400         case CHIP_REDWOOD:
401         case CHIP_JUNIPER:
402         case CHIP_CYPRESS:
403         case CHIP_HEMLOCK:
404                 rdev->asic = &evergreen_asic;
405                 break;
406         default:
407                 /* FIXME: not supported yet */
408                 return -EINVAL;
409         }
410
411         if (rdev->flags & RADEON_IS_IGP) {
412                 rdev->asic->get_memory_clock = NULL;
413                 rdev->asic->set_memory_clock = NULL;
414         }
415
416         return 0;
417 }
418
419
420 /*
421  * Wrapper around modesetting bits.
422  */
423 int radeon_clocks_init(struct radeon_device *rdev)
424 {
425         int r;
426
427         r = radeon_static_clocks_init(rdev->ddev);
428         if (r) {
429                 return r;
430         }
431         DRM_INFO("Clocks initialized !\n");
432         return 0;
433 }
434
435 void radeon_clocks_fini(struct radeon_device *rdev)
436 {
437 }
438
439 /* ATOM accessor methods */
440 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
441 {
442         struct radeon_device *rdev = info->dev->dev_private;
443         uint32_t r;
444
445         r = rdev->pll_rreg(rdev, reg);
446         return r;
447 }
448
449 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
450 {
451         struct radeon_device *rdev = info->dev->dev_private;
452
453         rdev->pll_wreg(rdev, reg, val);
454 }
455
456 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
457 {
458         struct radeon_device *rdev = info->dev->dev_private;
459         uint32_t r;
460
461         r = rdev->mc_rreg(rdev, reg);
462         return r;
463 }
464
465 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
466 {
467         struct radeon_device *rdev = info->dev->dev_private;
468
469         rdev->mc_wreg(rdev, reg, val);
470 }
471
472 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
473 {
474         struct radeon_device *rdev = info->dev->dev_private;
475
476         WREG32(reg*4, val);
477 }
478
479 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
480 {
481         struct radeon_device *rdev = info->dev->dev_private;
482         uint32_t r;
483
484         r = RREG32(reg*4);
485         return r;
486 }
487
488 int radeon_atombios_init(struct radeon_device *rdev)
489 {
490         struct card_info *atom_card_info =
491             kzalloc(sizeof(struct card_info), GFP_KERNEL);
492
493         if (!atom_card_info)
494                 return -ENOMEM;
495
496         rdev->mode_info.atom_card_info = atom_card_info;
497         atom_card_info->dev = rdev->ddev;
498         atom_card_info->reg_read = cail_reg_read;
499         atom_card_info->reg_write = cail_reg_write;
500         atom_card_info->mc_read = cail_mc_read;
501         atom_card_info->mc_write = cail_mc_write;
502         atom_card_info->pll_read = cail_pll_read;
503         atom_card_info->pll_write = cail_pll_write;
504
505         rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
506         mutex_init(&rdev->mode_info.atom_context->mutex);
507         radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
508         atom_allocate_fb_scratch(rdev->mode_info.atom_context);
509         return 0;
510 }
511
512 void radeon_atombios_fini(struct radeon_device *rdev)
513 {
514         if (rdev->mode_info.atom_context) {
515                 kfree(rdev->mode_info.atom_context->scratch);
516                 kfree(rdev->mode_info.atom_context);
517         }
518         kfree(rdev->mode_info.atom_card_info);
519 }
520
521 int radeon_combios_init(struct radeon_device *rdev)
522 {
523         radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
524         return 0;
525 }
526
527 void radeon_combios_fini(struct radeon_device *rdev)
528 {
529 }
530
531 /* if we get transitioned to only one device, tak VGA back */
532 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
533 {
534         struct radeon_device *rdev = cookie;
535         radeon_vga_set_state(rdev, state);
536         if (state)
537                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
538                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
539         else
540                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
541 }
542
543 void radeon_agp_disable(struct radeon_device *rdev)
544 {
545         rdev->flags &= ~RADEON_IS_AGP;
546         if (rdev->family >= CHIP_R600) {
547                 DRM_INFO("Forcing AGP to PCIE mode\n");
548                 rdev->flags |= RADEON_IS_PCIE;
549         } else if (rdev->family >= CHIP_RV515 ||
550                         rdev->family == CHIP_RV380 ||
551                         rdev->family == CHIP_RV410 ||
552                         rdev->family == CHIP_R423) {
553                 DRM_INFO("Forcing AGP to PCIE mode\n");
554                 rdev->flags |= RADEON_IS_PCIE;
555                 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
556                 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
557         } else {
558                 DRM_INFO("Forcing AGP to PCI mode\n");
559                 rdev->flags |= RADEON_IS_PCI;
560                 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
561                 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
562         }
563         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
564 }
565
566 void radeon_check_arguments(struct radeon_device *rdev)
567 {
568         /* vramlimit must be a power of two */
569         switch (radeon_vram_limit) {
570         case 0:
571         case 4:
572         case 8:
573         case 16:
574         case 32:
575         case 64:
576         case 128:
577         case 256:
578         case 512:
579         case 1024:
580         case 2048:
581         case 4096:
582                 break;
583         default:
584                 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
585                                 radeon_vram_limit);
586                 radeon_vram_limit = 0;
587                 break;
588         }
589         radeon_vram_limit = radeon_vram_limit << 20;
590         /* gtt size must be power of two and greater or equal to 32M */
591         switch (radeon_gart_size) {
592         case 4:
593         case 8:
594         case 16:
595                 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
596                                 radeon_gart_size);
597                 radeon_gart_size = 512;
598                 break;
599         case 32:
600         case 64:
601         case 128:
602         case 256:
603         case 512:
604         case 1024:
605         case 2048:
606         case 4096:
607                 break;
608         default:
609                 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
610                                 radeon_gart_size);
611                 radeon_gart_size = 512;
612                 break;
613         }
614         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
615         /* AGP mode can only be -1, 1, 2, 4, 8 */
616         switch (radeon_agpmode) {
617         case -1:
618         case 0:
619         case 1:
620         case 2:
621         case 4:
622         case 8:
623                 break;
624         default:
625                 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
626                                 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
627                 radeon_agpmode = 0;
628                 break;
629         }
630 }
631
632 int radeon_device_init(struct radeon_device *rdev,
633                        struct drm_device *ddev,
634                        struct pci_dev *pdev,
635                        uint32_t flags)
636 {
637         int r;
638         int dma_bits;
639
640         DRM_INFO("radeon: Initializing kernel modesetting.\n");
641         rdev->shutdown = false;
642         rdev->dev = &pdev->dev;
643         rdev->ddev = ddev;
644         rdev->pdev = pdev;
645         rdev->flags = flags;
646         rdev->family = flags & RADEON_FAMILY_MASK;
647         rdev->is_atom_bios = false;
648         rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
649         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
650         rdev->gpu_lockup = false;
651         rdev->accel_working = false;
652         /* mutex initialization are all done here so we
653          * can recall function without having locking issues */
654         mutex_init(&rdev->cs_mutex);
655         mutex_init(&rdev->ib_pool.mutex);
656         mutex_init(&rdev->cp.mutex);
657         mutex_init(&rdev->dc_hw_i2c_mutex);
658         if (rdev->family >= CHIP_R600)
659                 spin_lock_init(&rdev->ih.lock);
660         mutex_init(&rdev->gem.mutex);
661         mutex_init(&rdev->pm.mutex);
662         rwlock_init(&rdev->fence_drv.lock);
663         INIT_LIST_HEAD(&rdev->gem.objects);
664         init_waitqueue_head(&rdev->irq.vblank_queue);
665
666         /* setup workqueue */
667         rdev->wq = create_workqueue("radeon");
668         if (rdev->wq == NULL)
669                 return -ENOMEM;
670
671         /* Set asic functions */
672         r = radeon_asic_init(rdev);
673         if (r)
674                 return r;
675         radeon_check_arguments(rdev);
676
677         if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
678                 radeon_agp_disable(rdev);
679         }
680
681         /* set DMA mask + need_dma32 flags.
682          * PCIE - can handle 40-bits.
683          * IGP - can handle 40-bits (in theory)
684          * AGP - generally dma32 is safest
685          * PCI - only dma32
686          */
687         rdev->need_dma32 = false;
688         if (rdev->flags & RADEON_IS_AGP)
689                 rdev->need_dma32 = true;
690         if (rdev->flags & RADEON_IS_PCI)
691                 rdev->need_dma32 = true;
692
693         dma_bits = rdev->need_dma32 ? 32 : 40;
694         r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
695         if (r) {
696                 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
697         }
698
699         /* Registers mapping */
700         /* TODO: block userspace mapping of io register */
701         rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
702         rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
703         rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
704         if (rdev->rmmio == NULL) {
705                 return -ENOMEM;
706         }
707         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
708         DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
709
710         /* if we have > 1 VGA cards, then disable the radeon VGA resources */
711         /* this will fail for cards that aren't VGA class devices, just
712          * ignore it */
713         vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
714
715         r = radeon_init(rdev);
716         if (r)
717                 return r;
718
719         if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
720                 /* Acceleration not working on AGP card try again
721                  * with fallback to PCI or PCIE GART
722                  */
723                 radeon_gpu_reset(rdev);
724                 radeon_fini(rdev);
725                 radeon_agp_disable(rdev);
726                 r = radeon_init(rdev);
727                 if (r)
728                         return r;
729         }
730         if (radeon_testing) {
731                 radeon_test_moves(rdev);
732         }
733         if (radeon_benchmarking) {
734                 radeon_benchmark(rdev);
735         }
736         return 0;
737 }
738
739 void radeon_device_fini(struct radeon_device *rdev)
740 {
741         DRM_INFO("radeon: finishing device.\n");
742         rdev->shutdown = true;
743         radeon_fini(rdev);
744         destroy_workqueue(rdev->wq);
745         vga_client_register(rdev->pdev, NULL, NULL, NULL);
746         iounmap(rdev->rmmio);
747         rdev->rmmio = NULL;
748 }
749
750
751 /*
752  * Suspend & resume.
753  */
754 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
755 {
756         struct radeon_device *rdev;
757         struct drm_crtc *crtc;
758         int r;
759
760         if (dev == NULL || dev->dev_private == NULL) {
761                 return -ENODEV;
762         }
763         if (state.event == PM_EVENT_PRETHAW) {
764                 return 0;
765         }
766         rdev = dev->dev_private;
767
768         /* unpin the front buffers */
769         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
770                 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
771                 struct radeon_bo *robj;
772
773                 if (rfb == NULL || rfb->obj == NULL) {
774                         continue;
775                 }
776                 robj = rfb->obj->driver_private;
777                 if (robj != rdev->fbdev_rbo) {
778                         r = radeon_bo_reserve(robj, false);
779                         if (unlikely(r == 0)) {
780                                 radeon_bo_unpin(robj);
781                                 radeon_bo_unreserve(robj);
782                         }
783                 }
784         }
785         /* evict vram memory */
786         radeon_bo_evict_vram(rdev);
787         /* wait for gpu to finish processing current batch */
788         radeon_fence_wait_last(rdev);
789
790         radeon_save_bios_scratch_regs(rdev);
791
792         radeon_suspend(rdev);
793         radeon_hpd_fini(rdev);
794         /* evict remaining vram memory */
795         radeon_bo_evict_vram(rdev);
796
797         pci_save_state(dev->pdev);
798         if (state.event == PM_EVENT_SUSPEND) {
799                 /* Shut down the device */
800                 pci_disable_device(dev->pdev);
801                 pci_set_power_state(dev->pdev, PCI_D3hot);
802         }
803         acquire_console_sem();
804         fb_set_suspend(rdev->fbdev_info, 1);
805         release_console_sem();
806         return 0;
807 }
808
809 int radeon_resume_kms(struct drm_device *dev)
810 {
811         struct radeon_device *rdev = dev->dev_private;
812
813         acquire_console_sem();
814         pci_set_power_state(dev->pdev, PCI_D0);
815         pci_restore_state(dev->pdev);
816         if (pci_enable_device(dev->pdev)) {
817                 release_console_sem();
818                 return -1;
819         }
820         pci_set_master(dev->pdev);
821         /* resume AGP if in use */
822         radeon_agp_resume(rdev);
823         radeon_resume(rdev);
824         radeon_restore_bios_scratch_regs(rdev);
825         fb_set_suspend(rdev->fbdev_info, 0);
826         release_console_sem();
827
828         /* reset hpd state */
829         radeon_hpd_init(rdev);
830         /* blat the mode back in */
831         drm_helper_resume_force_mode(dev);
832         return 0;
833 }
834
835
836 /*
837  * Debugfs
838  */
839 struct radeon_debugfs {
840         struct drm_info_list    *files;
841         unsigned                num_files;
842 };
843 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
844 static unsigned _radeon_debugfs_count = 0;
845
846 int radeon_debugfs_add_files(struct radeon_device *rdev,
847                              struct drm_info_list *files,
848                              unsigned nfiles)
849 {
850         unsigned i;
851
852         for (i = 0; i < _radeon_debugfs_count; i++) {
853                 if (_radeon_debugfs[i].files == files) {
854                         /* Already registered */
855                         return 0;
856                 }
857         }
858         if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
859                 DRM_ERROR("Reached maximum number of debugfs files.\n");
860                 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
861                 return -EINVAL;
862         }
863         _radeon_debugfs[_radeon_debugfs_count].files = files;
864         _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
865         _radeon_debugfs_count++;
866 #if defined(CONFIG_DEBUG_FS)
867         drm_debugfs_create_files(files, nfiles,
868                                  rdev->ddev->control->debugfs_root,
869                                  rdev->ddev->control);
870         drm_debugfs_create_files(files, nfiles,
871                                  rdev->ddev->primary->debugfs_root,
872                                  rdev->ddev->primary);
873 #endif
874         return 0;
875 }
876
877 #if defined(CONFIG_DEBUG_FS)
878 int radeon_debugfs_init(struct drm_minor *minor)
879 {
880         return 0;
881 }
882
883 void radeon_debugfs_cleanup(struct drm_minor *minor)
884 {
885         unsigned i;
886
887         for (i = 0; i < _radeon_debugfs_count; i++) {
888                 drm_debugfs_remove_files(_radeon_debugfs[i].files,
889                                          _radeon_debugfs[i].num_files, minor);
890         }
891 }
892 #endif