a137ee2f722cf640aa94616ea7e72faaabded297
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / radeon_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include "drmP.h"
24 #include "radeon.h"
25 #include "avivod.h"
26
27 #define RADEON_IDLE_LOOP_MS 100
28 #define RADEON_RECLOCK_DELAY_MS 200
29 #define RADEON_WAIT_VBLANK_TIMEOUT 200
30 #define RADEON_WAIT_IDLE_TIMEOUT 200
31
32 static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
33 static void radeon_pm_set_clocks(struct radeon_device *rdev);
34 static void radeon_pm_idle_work_handler(struct work_struct *work);
35 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
36
37 static const char *pm_state_names[4] = {
38         "PM_STATE_DISABLED",
39         "PM_STATE_MINIMUM",
40         "PM_STATE_PAUSED",
41         "PM_STATE_ACTIVE"
42 };
43
44 static const char *pm_state_types[5] = {
45         "Default",
46         "Powersave",
47         "Battery",
48         "Balanced",
49         "Performance",
50 };
51
52 static void radeon_print_power_mode_info(struct radeon_device *rdev)
53 {
54         int i, j;
55         bool is_default;
56
57         DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
58         for (i = 0; i < rdev->pm.num_power_states; i++) {
59                 if (rdev->pm.default_power_state_index == i)
60                         is_default = true;
61                 else
62                         is_default = false;
63                 DRM_INFO("State %d %s %s\n", i,
64                          pm_state_types[rdev->pm.power_state[i].type],
65                          is_default ? "(default)" : "");
66                 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
67                         DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
68                 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
69                         DRM_INFO("\tSingle display only\n");
70                 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
71                 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
72                         if (rdev->flags & RADEON_IS_IGP)
73                                 DRM_INFO("\t\t%d engine: %d\n",
74                                          j,
75                                          rdev->pm.power_state[i].clock_info[j].sclk * 10);
76                         else
77                                 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
78                                          j,
79                                          rdev->pm.power_state[i].clock_info[j].sclk * 10,
80                                          rdev->pm.power_state[i].clock_info[j].mclk * 10);
81                 }
82         }
83 }
84
85 void radeon_sync_with_vblank(struct radeon_device *rdev)
86 {
87         if (rdev->pm.active_crtcs) {
88                 rdev->pm.vblank_sync = false;
89                 wait_event_timeout(
90                         rdev->irq.vblank_queue, rdev->pm.vblank_sync,
91                         msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
92         }
93 }
94
95 int radeon_pm_init(struct radeon_device *rdev)
96 {
97         rdev->pm.state = PM_STATE_DISABLED;
98         rdev->pm.planned_action = PM_ACTION_NONE;
99         rdev->pm.can_upclock = true;
100         rdev->pm.can_downclock = true;
101
102         if (rdev->bios) {
103                 if (rdev->is_atom_bios)
104                         radeon_atombios_get_power_modes(rdev);
105                 else
106                         radeon_combios_get_power_modes(rdev);
107                 radeon_print_power_mode_info(rdev);
108         }
109
110         if (radeon_debugfs_pm_init(rdev)) {
111                 DRM_ERROR("Failed to register debugfs file for PM!\n");
112         }
113
114         INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
115
116         if (radeon_dynpm != -1 && radeon_dynpm) {
117                 rdev->pm.state = PM_STATE_PAUSED;
118                 DRM_INFO("radeon: dynamic power management enabled\n");
119         }
120
121         DRM_INFO("radeon: power management initialized\n");
122
123         return 0;
124 }
125
126 void radeon_pm_fini(struct radeon_device *rdev)
127 {
128         if (rdev->pm.i2c_bus)
129                 radeon_i2c_destroy(rdev->pm.i2c_bus);
130 }
131
132 void radeon_pm_compute_clocks(struct radeon_device *rdev)
133 {
134         struct drm_device *ddev = rdev->ddev;
135         struct drm_crtc *crtc;
136         struct radeon_crtc *radeon_crtc;
137
138         if (rdev->pm.state == PM_STATE_DISABLED)
139                 return;
140
141         mutex_lock(&rdev->pm.mutex);
142
143         rdev->pm.active_crtcs = 0;
144         rdev->pm.active_crtc_count = 0;
145         list_for_each_entry(crtc,
146                 &ddev->mode_config.crtc_list, head) {
147                 radeon_crtc = to_radeon_crtc(crtc);
148                 if (radeon_crtc->enabled) {
149                         rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
150                         rdev->pm.active_crtc_count++;
151                 }
152         }
153
154         if (rdev->pm.active_crtc_count > 1) {
155                 if (rdev->pm.state == PM_STATE_ACTIVE) {
156                         cancel_delayed_work(&rdev->pm.idle_work);
157
158                         rdev->pm.state = PM_STATE_PAUSED;
159                         rdev->pm.planned_action = PM_ACTION_UPCLOCK;
160                         radeon_pm_set_clocks(rdev);
161
162                         DRM_DEBUG("radeon: dynamic power management deactivated\n");
163                 }
164         } else if (rdev->pm.active_crtc_count == 1) {
165                 /* TODO: Increase clocks if needed for current mode */
166
167                 if (rdev->pm.state == PM_STATE_MINIMUM) {
168                         rdev->pm.state = PM_STATE_ACTIVE;
169                         rdev->pm.planned_action = PM_ACTION_UPCLOCK;
170                         radeon_pm_set_clocks(rdev);
171
172                         queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
173                                 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
174                 } else if (rdev->pm.state == PM_STATE_PAUSED) {
175                         rdev->pm.state = PM_STATE_ACTIVE;
176                         queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
177                                 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
178                         DRM_DEBUG("radeon: dynamic power management activated\n");
179                 }
180         } else { /* count == 0 */
181                 if (rdev->pm.state != PM_STATE_MINIMUM) {
182                         cancel_delayed_work(&rdev->pm.idle_work);
183
184                         rdev->pm.state = PM_STATE_MINIMUM;
185                         rdev->pm.planned_action = PM_ACTION_MINIMUM;
186                         radeon_pm_set_clocks(rdev);
187                 }
188         }
189
190         mutex_unlock(&rdev->pm.mutex);
191 }
192
193 bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
194 {
195         u32 stat_crtc = 0;
196         bool in_vbl = true;
197
198         if (ASIC_IS_DCE4(rdev)) {
199                 if (rdev->pm.active_crtcs & (1 << 0)) {
200                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
201                         if (!(stat_crtc & 1))
202                                 in_vbl = false;
203                 }
204                 if (rdev->pm.active_crtcs & (1 << 1)) {
205                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
206                         if (!(stat_crtc & 1))
207                                 in_vbl = false;
208                 }
209                 if (rdev->pm.active_crtcs & (1 << 2)) {
210                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
211                         if (!(stat_crtc & 1))
212                                 in_vbl = false;
213                 }
214                 if (rdev->pm.active_crtcs & (1 << 3)) {
215                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
216                         if (!(stat_crtc & 1))
217                                 in_vbl = false;
218                 }
219                 if (rdev->pm.active_crtcs & (1 << 4)) {
220                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
221                         if (!(stat_crtc & 1))
222                                 in_vbl = false;
223                 }
224                 if (rdev->pm.active_crtcs & (1 << 5)) {
225                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
226                         if (!(stat_crtc & 1))
227                                 in_vbl = false;
228                 }
229         } else if (ASIC_IS_AVIVO(rdev)) {
230                 if (rdev->pm.active_crtcs & (1 << 0)) {
231                         stat_crtc = RREG32(D1CRTC_STATUS);
232                         if (!(stat_crtc & 1))
233                                 in_vbl = false;
234                 }
235                 if (rdev->pm.active_crtcs & (1 << 1)) {
236                         stat_crtc = RREG32(D2CRTC_STATUS);
237                         if (!(stat_crtc & 1))
238                                 in_vbl = false;
239                 }
240         } else {
241                 if (rdev->pm.active_crtcs & (1 << 0)) {
242                         stat_crtc = RREG32(RADEON_CRTC_STATUS);
243                         if (!(stat_crtc & 1))
244                                 in_vbl = false;
245                 }
246                 if (rdev->pm.active_crtcs & (1 << 1)) {
247                         stat_crtc = RREG32(RADEON_CRTC2_STATUS);
248                         if (!(stat_crtc & 1))
249                                 in_vbl = false;
250                 }
251         }
252         if (in_vbl == false)
253                 DRM_INFO("not in vbl for pm change %08x at %s\n", stat_crtc,
254                          finish ? "exit" : "entry");
255         return in_vbl;
256 }
257 static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
258 {
259         /*radeon_fence_wait_last(rdev);*/
260
261         radeon_set_power_state(rdev);
262         rdev->pm.planned_action = PM_ACTION_NONE;
263 }
264
265 static void radeon_pm_set_clocks(struct radeon_device *rdev)
266 {
267         int i;
268
269         radeon_get_power_state(rdev, rdev->pm.planned_action);
270         mutex_lock(&rdev->cp.mutex);
271
272         /* wait for GPU idle */
273         rdev->pm.gui_idle = false;
274         rdev->irq.gui_idle = true;
275         radeon_irq_set(rdev);
276         wait_event_interruptible_timeout(
277                 rdev->irq.idle_queue, rdev->pm.gui_idle,
278                 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
279         rdev->irq.gui_idle = false;
280         radeon_irq_set(rdev);
281
282         for (i = 0; i < rdev->num_crtc; i++) {
283                 if (rdev->pm.active_crtcs & (1 << i)) {
284                         rdev->pm.req_vblank |= (1 << i);
285                         drm_vblank_get(rdev->ddev, i);
286                 }
287         }
288         radeon_pm_set_clocks_locked(rdev);
289         for (i = 0; i < rdev->num_crtc; i++) {
290                 if (rdev->pm.req_vblank & (1 << i)) {
291                         rdev->pm.req_vblank &= ~(1 << i);
292                         drm_vblank_put(rdev->ddev, i);
293                 }
294         }
295
296         /* update display watermarks based on new power state */
297         radeon_update_bandwidth_info(rdev);
298         if (rdev->pm.active_crtc_count)
299                 radeon_bandwidth_update(rdev);
300
301         mutex_unlock(&rdev->cp.mutex);
302 }
303
304 static void radeon_pm_idle_work_handler(struct work_struct *work)
305 {
306         struct radeon_device *rdev;
307         rdev = container_of(work, struct radeon_device,
308                                 pm.idle_work.work);
309
310         mutex_lock(&rdev->pm.mutex);
311         if (rdev->pm.state == PM_STATE_ACTIVE) {
312                 unsigned long irq_flags;
313                 int not_processed = 0;
314
315                 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
316                 if (!list_empty(&rdev->fence_drv.emited)) {
317                         struct list_head *ptr;
318                         list_for_each(ptr, &rdev->fence_drv.emited) {
319                                 /* count up to 3, that's enought info */
320                                 if (++not_processed >= 3)
321                                         break;
322                         }
323                 }
324                 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
325
326                 if (not_processed >= 3) { /* should upclock */
327                         if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
328                                 rdev->pm.planned_action = PM_ACTION_NONE;
329                         } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
330                                    rdev->pm.can_upclock) {
331                                 rdev->pm.planned_action =
332                                         PM_ACTION_UPCLOCK;
333                                 rdev->pm.action_timeout = jiffies +
334                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
335                         }
336                 } else if (not_processed == 0) { /* should downclock */
337                         if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
338                                 rdev->pm.planned_action = PM_ACTION_NONE;
339                         } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
340                                    rdev->pm.can_downclock) {
341                                 rdev->pm.planned_action =
342                                         PM_ACTION_DOWNCLOCK;
343                                 rdev->pm.action_timeout = jiffies +
344                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
345                         }
346                 }
347
348                 if (rdev->pm.planned_action != PM_ACTION_NONE &&
349                     jiffies > rdev->pm.action_timeout) {
350                         radeon_pm_set_clocks(rdev);
351                 }
352         }
353         mutex_unlock(&rdev->pm.mutex);
354
355         queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
356                                         msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
357 }
358
359 /*
360  * Debugfs info
361  */
362 #if defined(CONFIG_DEBUG_FS)
363
364 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
365 {
366         struct drm_info_node *node = (struct drm_info_node *) m->private;
367         struct drm_device *dev = node->minor->dev;
368         struct radeon_device *rdev = dev->dev_private;
369
370         seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
371         seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
372         seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
373         seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
374         if (rdev->asic->get_memory_clock)
375                 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
376         if (rdev->asic->get_pcie_lanes)
377                 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
378
379         return 0;
380 }
381
382 static struct drm_info_list radeon_pm_info_list[] = {
383         {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
384 };
385 #endif
386
387 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
388 {
389 #if defined(CONFIG_DEBUG_FS)
390         return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
391 #else
392         return 0;
393 #endif
394 }