drm/radeon/kms: add r600 KMS support
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / radeon_device.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/console.h>
29 #include <drm/drmP.h>
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/radeon_drm.h>
32 #include "radeon_reg.h"
33 #include "radeon.h"
34 #include "radeon_asic.h"
35 #include "atom.h"
36
37 /*
38  * Clear GPU surface registers.
39  */
40 void radeon_surface_init(struct radeon_device *rdev)
41 {
42         /* FIXME: check this out */
43         if (rdev->family < CHIP_R600) {
44                 int i;
45
46                 for (i = 0; i < 8; i++) {
47                         WREG32(RADEON_SURFACE0_INFO +
48                                i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
49                                0);
50                 }
51                 /* enable surfaces */
52                 WREG32(RADEON_SURFACE_CNTL, 0);
53         }
54 }
55
56 /*
57  * GPU scratch registers helpers function.
58  */
59 void radeon_scratch_init(struct radeon_device *rdev)
60 {
61         int i;
62
63         /* FIXME: check this out */
64         if (rdev->family < CHIP_R300) {
65                 rdev->scratch.num_reg = 5;
66         } else {
67                 rdev->scratch.num_reg = 7;
68         }
69         for (i = 0; i < rdev->scratch.num_reg; i++) {
70                 rdev->scratch.free[i] = true;
71                 rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
72         }
73 }
74
75 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
76 {
77         int i;
78
79         for (i = 0; i < rdev->scratch.num_reg; i++) {
80                 if (rdev->scratch.free[i]) {
81                         rdev->scratch.free[i] = false;
82                         *reg = rdev->scratch.reg[i];
83                         return 0;
84                 }
85         }
86         return -EINVAL;
87 }
88
89 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
90 {
91         int i;
92
93         for (i = 0; i < rdev->scratch.num_reg; i++) {
94                 if (rdev->scratch.reg[i] == reg) {
95                         rdev->scratch.free[i] = true;
96                         return;
97                 }
98         }
99 }
100
101 /*
102  * MC common functions
103  */
104 int radeon_mc_setup(struct radeon_device *rdev)
105 {
106         uint32_t tmp;
107
108         /* Some chips have an "issue" with the memory controller, the
109          * location must be aligned to the size. We just align it down,
110          * too bad if we walk over the top of system memory, we don't
111          * use DMA without a remapped anyway.
112          * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
113          */
114         /* FGLRX seems to setup like this, VRAM a 0, then GART.
115          */
116         /*
117          * Note: from R6xx the address space is 40bits but here we only
118          * use 32bits (still have to see a card which would exhaust 4G
119          * address space).
120          */
121         if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
122                 /* vram location was already setup try to put gtt after
123                  * if it fits */
124                 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
125                 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
126                 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
127                         rdev->mc.gtt_location = tmp;
128                 } else {
129                         if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
130                                 printk(KERN_ERR "[drm] GTT too big to fit "
131                                        "before or after vram location.\n");
132                                 return -EINVAL;
133                         }
134                         rdev->mc.gtt_location = 0;
135                 }
136         } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
137                 /* gtt location was already setup try to put vram before
138                  * if it fits */
139                 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
140                         rdev->mc.vram_location = 0;
141                 } else {
142                         tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
143                         tmp += (rdev->mc.mc_vram_size - 1);
144                         tmp &= ~(rdev->mc.mc_vram_size - 1);
145                         if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
146                                 rdev->mc.vram_location = tmp;
147                         } else {
148                                 printk(KERN_ERR "[drm] vram too big to fit "
149                                        "before or after GTT location.\n");
150                                 return -EINVAL;
151                         }
152                 }
153         } else {
154                 rdev->mc.vram_location = 0;
155                 tmp = rdev->mc.mc_vram_size;
156                 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
157                 rdev->mc.gtt_location = tmp;
158         }
159         DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
160         DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
161                  (unsigned)rdev->mc.vram_location,
162                  (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
163         DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
164         DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
165                  (unsigned)rdev->mc.gtt_location,
166                  (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
167         return 0;
168 }
169
170
171 /*
172  * GPU helpers function.
173  */
174 static bool radeon_card_posted(struct radeon_device *rdev)
175 {
176         uint32_t reg;
177
178         /* first check CRTCs */
179         if (ASIC_IS_AVIVO(rdev)) {
180                 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
181                       RREG32(AVIVO_D2CRTC_CONTROL);
182                 if (reg & AVIVO_CRTC_EN) {
183                         return true;
184                 }
185         } else {
186                 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
187                       RREG32(RADEON_CRTC2_GEN_CNTL);
188                 if (reg & RADEON_CRTC_EN) {
189                         return true;
190                 }
191         }
192
193         /* then check MEM_SIZE, in case the crtcs are off */
194         if (rdev->family >= CHIP_R600)
195                 reg = RREG32(R600_CONFIG_MEMSIZE);
196         else
197                 reg = RREG32(RADEON_CONFIG_MEMSIZE);
198
199         if (reg)
200                 return true;
201
202         return false;
203
204 }
205
206 int radeon_dummy_page_init(struct radeon_device *rdev)
207 {
208         rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
209         if (rdev->dummy_page.page == NULL)
210                 return -ENOMEM;
211         rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
212                                         0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
213         if (!rdev->dummy_page.addr) {
214                 __free_page(rdev->dummy_page.page);
215                 rdev->dummy_page.page = NULL;
216                 return -ENOMEM;
217         }
218         return 0;
219 }
220
221 void radeon_dummy_page_fini(struct radeon_device *rdev)
222 {
223         if (rdev->dummy_page.page == NULL)
224                 return;
225         pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
226                         PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
227         __free_page(rdev->dummy_page.page);
228         rdev->dummy_page.page = NULL;
229 }
230
231
232 /*
233  * Registers accessors functions.
234  */
235 uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
236 {
237         DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
238         BUG_ON(1);
239         return 0;
240 }
241
242 void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
243 {
244         DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
245                   reg, v);
246         BUG_ON(1);
247 }
248
249 void radeon_register_accessor_init(struct radeon_device *rdev)
250 {
251         rdev->mc_rreg = &radeon_invalid_rreg;
252         rdev->mc_wreg = &radeon_invalid_wreg;
253         rdev->pll_rreg = &radeon_invalid_rreg;
254         rdev->pll_wreg = &radeon_invalid_wreg;
255         rdev->pciep_rreg = &radeon_invalid_rreg;
256         rdev->pciep_wreg = &radeon_invalid_wreg;
257
258         /* Don't change order as we are overridding accessor. */
259         if (rdev->family < CHIP_RV515) {
260                 rdev->pcie_reg_mask = 0xff;
261         } else {
262                 rdev->pcie_reg_mask = 0x7ff;
263         }
264         /* FIXME: not sure here */
265         if (rdev->family <= CHIP_R580) {
266                 rdev->pll_rreg = &r100_pll_rreg;
267                 rdev->pll_wreg = &r100_pll_wreg;
268         }
269         if (rdev->family >= CHIP_RV515) {
270                 rdev->mc_rreg = &rv515_mc_rreg;
271                 rdev->mc_wreg = &rv515_mc_wreg;
272         }
273         if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
274                 rdev->mc_rreg = &rs400_mc_rreg;
275                 rdev->mc_wreg = &rs400_mc_wreg;
276         }
277         if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
278                 rdev->mc_rreg = &rs690_mc_rreg;
279                 rdev->mc_wreg = &rs690_mc_wreg;
280         }
281         if (rdev->family == CHIP_RS600) {
282                 rdev->mc_rreg = &rs600_mc_rreg;
283                 rdev->mc_wreg = &rs600_mc_wreg;
284         }
285         if (rdev->family >= CHIP_R600) {
286                 rdev->pciep_rreg = &r600_pciep_rreg;
287                 rdev->pciep_wreg = &r600_pciep_wreg;
288         }
289 }
290
291
292 /*
293  * ASIC
294  */
295 int radeon_asic_init(struct radeon_device *rdev)
296 {
297         radeon_register_accessor_init(rdev);
298         switch (rdev->family) {
299         case CHIP_R100:
300         case CHIP_RV100:
301         case CHIP_RS100:
302         case CHIP_RV200:
303         case CHIP_RS200:
304         case CHIP_R200:
305         case CHIP_RV250:
306         case CHIP_RS300:
307         case CHIP_RV280:
308                 rdev->asic = &r100_asic;
309                 break;
310         case CHIP_R300:
311         case CHIP_R350:
312         case CHIP_RV350:
313         case CHIP_RV380:
314                 rdev->asic = &r300_asic;
315                 break;
316         case CHIP_R420:
317         case CHIP_R423:
318         case CHIP_RV410:
319                 rdev->asic = &r420_asic;
320                 break;
321         case CHIP_RS400:
322         case CHIP_RS480:
323                 rdev->asic = &rs400_asic;
324                 break;
325         case CHIP_RS600:
326                 rdev->asic = &rs600_asic;
327                 break;
328         case CHIP_RS690:
329         case CHIP_RS740:
330                 rdev->asic = &rs690_asic;
331                 break;
332         case CHIP_RV515:
333                 rdev->asic = &rv515_asic;
334                 break;
335         case CHIP_R520:
336         case CHIP_RV530:
337         case CHIP_RV560:
338         case CHIP_RV570:
339         case CHIP_R580:
340                 rdev->asic = &r520_asic;
341                 break;
342         case CHIP_R600:
343         case CHIP_RV610:
344         case CHIP_RV630:
345         case CHIP_RV620:
346         case CHIP_RV635:
347         case CHIP_RV670:
348         case CHIP_RS780:
349         case CHIP_RS880:
350                 rdev->asic = &r600_asic;
351                 break;
352         case CHIP_RV770:
353         case CHIP_RV730:
354         case CHIP_RV710:
355         case CHIP_RV740:
356                 rdev->asic = &rv770_asic;
357                 break;
358         default:
359                 /* FIXME: not supported yet */
360                 return -EINVAL;
361         }
362         return 0;
363 }
364
365
366 /*
367  * Wrapper around modesetting bits.
368  */
369 int radeon_clocks_init(struct radeon_device *rdev)
370 {
371         int r;
372
373         radeon_get_clock_info(rdev->ddev);
374         r = radeon_static_clocks_init(rdev->ddev);
375         if (r) {
376                 return r;
377         }
378         DRM_INFO("Clocks initialized !\n");
379         return 0;
380 }
381
382 void radeon_clocks_fini(struct radeon_device *rdev)
383 {
384 }
385
386 /* ATOM accessor methods */
387 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
388 {
389         struct radeon_device *rdev = info->dev->dev_private;
390         uint32_t r;
391
392         r = rdev->pll_rreg(rdev, reg);
393         return r;
394 }
395
396 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
397 {
398         struct radeon_device *rdev = info->dev->dev_private;
399
400         rdev->pll_wreg(rdev, reg, val);
401 }
402
403 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
404 {
405         struct radeon_device *rdev = info->dev->dev_private;
406         uint32_t r;
407
408         r = rdev->mc_rreg(rdev, reg);
409         return r;
410 }
411
412 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
413 {
414         struct radeon_device *rdev = info->dev->dev_private;
415
416         rdev->mc_wreg(rdev, reg, val);
417 }
418
419 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
420 {
421         struct radeon_device *rdev = info->dev->dev_private;
422
423         WREG32(reg*4, val);
424 }
425
426 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
427 {
428         struct radeon_device *rdev = info->dev->dev_private;
429         uint32_t r;
430
431         r = RREG32(reg*4);
432         return r;
433 }
434
435 static struct card_info atom_card_info = {
436         .dev = NULL,
437         .reg_read = cail_reg_read,
438         .reg_write = cail_reg_write,
439         .mc_read = cail_mc_read,
440         .mc_write = cail_mc_write,
441         .pll_read = cail_pll_read,
442         .pll_write = cail_pll_write,
443 };
444
445 int radeon_atombios_init(struct radeon_device *rdev)
446 {
447         atom_card_info.dev = rdev->ddev;
448         rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
449         radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
450         return 0;
451 }
452
453 void radeon_atombios_fini(struct radeon_device *rdev)
454 {
455         kfree(rdev->mode_info.atom_context);
456 }
457
458 int radeon_combios_init(struct radeon_device *rdev)
459 {
460         radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
461         return 0;
462 }
463
464 void radeon_combios_fini(struct radeon_device *rdev)
465 {
466 }
467
468 int radeon_modeset_init(struct radeon_device *rdev);
469 void radeon_modeset_fini(struct radeon_device *rdev);
470
471
472 /*
473  * Radeon device.
474  */
475 int radeon_device_init(struct radeon_device *rdev,
476                        struct drm_device *ddev,
477                        struct pci_dev *pdev,
478                        uint32_t flags)
479 {
480         int r, ret = 0;
481         int dma_bits;
482
483         DRM_INFO("radeon: Initializing kernel modesetting.\n");
484         rdev->shutdown = false;
485         rdev->ddev = ddev;
486         rdev->pdev = pdev;
487         rdev->flags = flags;
488         rdev->family = flags & RADEON_FAMILY_MASK;
489         rdev->is_atom_bios = false;
490         rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
491         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
492         rdev->gpu_lockup = false;
493         /* mutex initialization are all done here so we
494          * can recall function without having locking issues */
495         mutex_init(&rdev->cs_mutex);
496         mutex_init(&rdev->ib_pool.mutex);
497         mutex_init(&rdev->cp.mutex);
498         rwlock_init(&rdev->fence_drv.lock);
499
500         if (radeon_agpmode == -1) {
501                 rdev->flags &= ~RADEON_IS_AGP;
502                 if (rdev->family > CHIP_RV515 ||
503                     rdev->family == CHIP_RV380 ||
504                     rdev->family == CHIP_RV410 ||
505                     rdev->family == CHIP_R423) {
506                         DRM_INFO("Forcing AGP to PCIE mode\n");
507                         rdev->flags |= RADEON_IS_PCIE;
508                 } else {
509                         DRM_INFO("Forcing AGP to PCI mode\n");
510                         rdev->flags |= RADEON_IS_PCI;
511                 }
512         }
513
514         /* Set asic functions */
515         r = radeon_asic_init(rdev);
516         if (r) {
517                 return r;
518         }
519
520         /* set DMA mask + need_dma32 flags.
521          * PCIE - can handle 40-bits.
522          * IGP - can handle 40-bits (in theory)
523          * AGP - generally dma32 is safest
524          * PCI - only dma32
525          */
526         rdev->need_dma32 = false;
527         if (rdev->flags & RADEON_IS_AGP)
528                 rdev->need_dma32 = true;
529         if (rdev->flags & RADEON_IS_PCI)
530                 rdev->need_dma32 = true;
531
532         dma_bits = rdev->need_dma32 ? 32 : 40;
533         r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
534         if (r) {
535                 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
536         }
537
538         /* Registers mapping */
539         /* TODO: block userspace mapping of io register */
540         rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
541         rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
542         rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
543         if (rdev->rmmio == NULL) {
544                 return -ENOMEM;
545         }
546         DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
547         DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
548
549         rdev->new_init_path = false;
550         r = radeon_init(rdev);
551         if (r) {
552                 return r;
553         }
554         if (!rdev->new_init_path) {
555                 /* Setup errata flags */
556                 radeon_errata(rdev);
557                 /* Initialize scratch registers */
558                 radeon_scratch_init(rdev);
559                 /* Initialize surface registers */
560                 radeon_surface_init(rdev);
561
562                 /* TODO: disable VGA need to use VGA request */
563                 /* BIOS*/
564                 if (!radeon_get_bios(rdev)) {
565                         if (ASIC_IS_AVIVO(rdev))
566                                 return -EINVAL;
567                 }
568                 if (rdev->is_atom_bios) {
569                         r = radeon_atombios_init(rdev);
570                         if (r) {
571                                 return r;
572                         }
573                 } else {
574                         r = radeon_combios_init(rdev);
575                         if (r) {
576                                 return r;
577                         }
578                 }
579                 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
580                 if (radeon_gpu_reset(rdev)) {
581                         /* FIXME: what do we want to do here ? */
582                 }
583                 /* check if cards are posted or not */
584                 if (!radeon_card_posted(rdev) && rdev->bios) {
585                         DRM_INFO("GPU not posted. posting now...\n");
586                         if (rdev->is_atom_bios) {
587                                 atom_asic_init(rdev->mode_info.atom_context);
588                         } else {
589                                 radeon_combios_asic_init(rdev->ddev);
590                         }
591                 }
592                 /* Initialize clocks */
593                 r = radeon_clocks_init(rdev);
594                 if (r) {
595                         return r;
596                 }
597                 /* Get vram informations */
598                 radeon_vram_info(rdev);
599
600                 /* Add an MTRR for the VRAM */
601                 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
602                                 MTRR_TYPE_WRCOMB, 1);
603                 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
604                                 (unsigned)(rdev->mc.mc_vram_size >> 20),
605                                 (unsigned)(rdev->mc.aper_size >> 20));
606                 DRM_INFO("RAM width %dbits %cDR\n",
607                                 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
608                 /* Initialize memory controller (also test AGP) */
609                 r = radeon_mc_init(rdev);
610                 if (r) {
611                         return r;
612                 }
613                 /* Fence driver */
614                 r = radeon_fence_driver_init(rdev);
615                 if (r) {
616                         return r;
617                 }
618                 r = radeon_irq_kms_init(rdev);
619                 if (r) {
620                         return r;
621                 }
622                 /* Memory manager */
623                 r = radeon_object_init(rdev);
624                 if (r) {
625                         return r;
626                 }
627                 /* Initialize GART (initialize after TTM so we can allocate
628                  * memory through TTM but finalize after TTM) */
629                 r = radeon_gart_enable(rdev);
630                 if (!r) {
631                         r = radeon_gem_init(rdev);
632                 }
633
634                 /* 1M ring buffer */
635                 if (!r) {
636                         r = radeon_cp_init(rdev, 1024 * 1024);
637                 }
638                 if (!r) {
639                         r = radeon_wb_init(rdev);
640                         if (r) {
641                                 DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
642                                 return r;
643                         }
644                 }
645                 if (!r) {
646                         r = radeon_ib_pool_init(rdev);
647                         if (r) {
648                                 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
649                                 return r;
650                         }
651                 }
652                 if (!r) {
653                         r = radeon_ib_test(rdev);
654                         if (r) {
655                                 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
656                                 return r;
657                         }
658                 }
659                 ret = r;
660         }
661         r = radeon_modeset_init(rdev);
662         if (r) {
663                 return r;
664         }
665         if (!ret) {
666                 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
667         }
668         if (radeon_testing) {
669                 radeon_test_moves(rdev);
670         }
671         if (radeon_benchmarking) {
672                 radeon_benchmark(rdev);
673         }
674         return ret;
675 }
676
677 void radeon_device_fini(struct radeon_device *rdev)
678 {
679         if (rdev == NULL || rdev->rmmio == NULL) {
680                 return;
681         }
682         DRM_INFO("radeon: finishing device.\n");
683         rdev->shutdown = true;
684         /* Order matter so becarefull if you rearrange anythings */
685         radeon_modeset_fini(rdev);
686         if (!rdev->new_init_path) {
687                 radeon_ib_pool_fini(rdev);
688                 radeon_cp_fini(rdev);
689                 radeon_wb_fini(rdev);
690                 radeon_gem_fini(rdev);
691                 radeon_mc_fini(rdev);
692 #if __OS_HAS_AGP
693                 radeon_agp_fini(rdev);
694 #endif
695                 radeon_irq_kms_fini(rdev);
696                 radeon_fence_driver_fini(rdev);
697                 radeon_clocks_fini(rdev);
698                 radeon_object_fini(rdev);
699                 if (rdev->is_atom_bios) {
700                         radeon_atombios_fini(rdev);
701                 } else {
702                         radeon_combios_fini(rdev);
703                 }
704                 kfree(rdev->bios);
705                 rdev->bios = NULL;
706         } else {
707                 radeon_fini(rdev);
708         }
709         iounmap(rdev->rmmio);
710         rdev->rmmio = NULL;
711 }
712
713
714 /*
715  * Suspend & resume.
716  */
717 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
718 {
719         struct radeon_device *rdev = dev->dev_private;
720         struct drm_crtc *crtc;
721
722         if (dev == NULL || rdev == NULL) {
723                 return -ENODEV;
724         }
725         if (state.event == PM_EVENT_PRETHAW) {
726                 return 0;
727         }
728         /* unpin the front buffers */
729         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
730                 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
731                 struct radeon_object *robj;
732
733                 if (rfb == NULL || rfb->obj == NULL) {
734                         continue;
735                 }
736                 robj = rfb->obj->driver_private;
737                 if (robj != rdev->fbdev_robj) {
738                         radeon_object_unpin(robj);
739                 }
740         }
741         /* evict vram memory */
742         radeon_object_evict_vram(rdev);
743         /* wait for gpu to finish processing current batch */
744         radeon_fence_wait_last(rdev);
745
746         if (!rdev->new_init_path) {
747                 radeon_cp_disable(rdev);
748                 radeon_gart_disable(rdev);
749         } else {
750                 radeon_suspend(rdev);
751         }
752         /* evict remaining vram memory */
753         radeon_object_evict_vram(rdev);
754
755         rdev->irq.sw_int = false;
756         radeon_irq_set(rdev);
757
758         pci_save_state(dev->pdev);
759         if (state.event == PM_EVENT_SUSPEND) {
760                 /* Shut down the device */
761                 pci_disable_device(dev->pdev);
762                 pci_set_power_state(dev->pdev, PCI_D3hot);
763         }
764         acquire_console_sem();
765         fb_set_suspend(rdev->fbdev_info, 1);
766         release_console_sem();
767         return 0;
768 }
769
770 int radeon_resume_kms(struct drm_device *dev)
771 {
772         struct radeon_device *rdev = dev->dev_private;
773         int r;
774
775         acquire_console_sem();
776         pci_set_power_state(dev->pdev, PCI_D0);
777         pci_restore_state(dev->pdev);
778         if (pci_enable_device(dev->pdev)) {
779                 release_console_sem();
780                 return -1;
781         }
782         pci_set_master(dev->pdev);
783         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
784         if (radeon_gpu_reset(rdev)) {
785                 /* FIXME: what do we want to do here ? */
786         }
787         if (!rdev->new_init_path) {
788                 /* post card */
789                 if (rdev->is_atom_bios) {
790                         atom_asic_init(rdev->mode_info.atom_context);
791                 } else {
792                         radeon_combios_asic_init(rdev->ddev);
793                 }
794                 /* Initialize clocks */
795                 r = radeon_clocks_init(rdev);
796                 if (r) {
797                         release_console_sem();
798                         return r;
799                 }
800                 /* Enable IRQ */
801                 rdev->irq.sw_int = true;
802                 radeon_irq_set(rdev);
803                 /* Initialize GPU Memory Controller */
804                 r = radeon_mc_init(rdev);
805                 if (r) {
806                         goto out;
807                 }
808                 r = radeon_gart_enable(rdev);
809                 if (r) {
810                         goto out;
811                 }
812                 r = radeon_cp_init(rdev, rdev->cp.ring_size);
813                 if (r) {
814                         goto out;
815                 }
816         } else {
817                 radeon_resume(rdev);
818         }
819 out:
820         fb_set_suspend(rdev->fbdev_info, 0);
821         release_console_sem();
822
823         /* blat the mode back in */
824         drm_helper_resume_force_mode(dev);
825         return 0;
826 }
827
828
829 /*
830  * Debugfs
831  */
832 struct radeon_debugfs {
833         struct drm_info_list    *files;
834         unsigned                num_files;
835 };
836 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
837 static unsigned _radeon_debugfs_count = 0;
838
839 int radeon_debugfs_add_files(struct radeon_device *rdev,
840                              struct drm_info_list *files,
841                              unsigned nfiles)
842 {
843         unsigned i;
844
845         for (i = 0; i < _radeon_debugfs_count; i++) {
846                 if (_radeon_debugfs[i].files == files) {
847                         /* Already registered */
848                         return 0;
849                 }
850         }
851         if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
852                 DRM_ERROR("Reached maximum number of debugfs files.\n");
853                 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
854                 return -EINVAL;
855         }
856         _radeon_debugfs[_radeon_debugfs_count].files = files;
857         _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
858         _radeon_debugfs_count++;
859 #if defined(CONFIG_DEBUG_FS)
860         drm_debugfs_create_files(files, nfiles,
861                                  rdev->ddev->control->debugfs_root,
862                                  rdev->ddev->control);
863         drm_debugfs_create_files(files, nfiles,
864                                  rdev->ddev->primary->debugfs_root,
865                                  rdev->ddev->primary);
866 #endif
867         return 0;
868 }
869
870 #if defined(CONFIG_DEBUG_FS)
871 int radeon_debugfs_init(struct drm_minor *minor)
872 {
873         return 0;
874 }
875
876 void radeon_debugfs_cleanup(struct drm_minor *minor)
877 {
878         unsigned i;
879
880         for (i = 0; i < _radeon_debugfs_count; i++) {
881                 drm_debugfs_remove_files(_radeon_debugfs[i].files,
882                                          _radeon_debugfs[i].num_files, minor);
883         }
884 }
885 #endif