drm/radeon/kms/pm: rework power management
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / radeon_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include "drmP.h"
24 #include "radeon.h"
25 #include "avivod.h"
26
27 #define RADEON_IDLE_LOOP_MS 100
28 #define RADEON_RECLOCK_DELAY_MS 200
29 #define RADEON_WAIT_VBLANK_TIMEOUT 200
30 #define RADEON_WAIT_IDLE_TIMEOUT 200
31
32 static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
33 static void radeon_pm_set_clocks(struct radeon_device *rdev);
34 static void radeon_pm_idle_work_handler(struct work_struct *work);
35 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
36
37 static void radeon_pm_set_power_mode_static_locked(struct radeon_device *rdev)
38 {
39         mutex_lock(&rdev->cp.mutex);
40
41         /* wait for GPU idle */
42         rdev->pm.gui_idle = false;
43         rdev->irq.gui_idle = true;
44         radeon_irq_set(rdev);
45         wait_event_interruptible_timeout(
46                 rdev->irq.idle_queue, rdev->pm.gui_idle,
47                 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
48         rdev->irq.gui_idle = false;
49         radeon_irq_set(rdev);
50
51         radeon_set_power_state(rdev, true);
52
53         /* update display watermarks based on new power state */
54         radeon_update_bandwidth_info(rdev);
55         if (rdev->pm.active_crtc_count)
56                 radeon_bandwidth_update(rdev);
57
58         mutex_unlock(&rdev->cp.mutex);
59 }
60
61 static ssize_t radeon_get_power_state_static(struct device *dev,
62                                              struct device_attribute *attr,
63                                              char *buf)
64 {
65         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
66         struct radeon_device *rdev = ddev->dev_private;
67
68         return snprintf(buf, PAGE_SIZE, "%d.%d\n", rdev->pm.current_power_state_index,
69                         rdev->pm.current_clock_mode_index);
70 }
71
72 static ssize_t radeon_set_power_state_static(struct device *dev,
73                                              struct device_attribute *attr,
74                                              const char *buf,
75                                              size_t count)
76 {
77         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
78         struct radeon_device *rdev = ddev->dev_private;
79         int ps, cm;
80
81         if (sscanf(buf, "%u.%u", &ps, &cm) != 2) {
82                 DRM_ERROR("Invalid power state!\n");
83                 return count;
84         }
85
86         mutex_lock(&rdev->pm.mutex);
87         if ((ps >= 0) && (ps < rdev->pm.num_power_states) &&
88             (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) {
89                 if ((rdev->pm.active_crtc_count > 1) &&
90                     (rdev->pm.power_state[ps].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)) {
91                         DRM_ERROR("Invalid power state for multi-head: %d.%d\n", ps, cm);
92                 } else {
93                         /* disable dynpm */
94                         rdev->pm.state = PM_STATE_DISABLED;
95                         rdev->pm.planned_action = PM_ACTION_NONE;
96                         rdev->pm.requested_power_state_index = ps;
97                         rdev->pm.requested_clock_mode_index = cm;
98                         radeon_pm_set_power_mode_static_locked(rdev);
99                 }
100         } else
101                 DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm);
102         mutex_unlock(&rdev->pm.mutex);
103
104         return count;
105 }
106
107 static ssize_t radeon_get_dynpm(struct device *dev,
108                                 struct device_attribute *attr,
109                                 char *buf)
110 {
111         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
112         struct radeon_device *rdev = ddev->dev_private;
113
114         return snprintf(buf, PAGE_SIZE, "%s\n",
115                         (rdev->pm.state == PM_STATE_DISABLED) ? "disabled" : "enabled");
116 }
117
118 static ssize_t radeon_set_dynpm(struct device *dev,
119                                 struct device_attribute *attr,
120                                 const char *buf,
121                                 size_t count)
122 {
123         struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
124         struct radeon_device *rdev = ddev->dev_private;
125         int tmp = simple_strtoul(buf, NULL, 10);
126
127         if (tmp == 0) {
128                 /* update power mode info */
129                 radeon_pm_compute_clocks(rdev);
130                 /* disable dynpm */
131                 mutex_lock(&rdev->pm.mutex);
132                 rdev->pm.state = PM_STATE_DISABLED;
133                 rdev->pm.planned_action = PM_ACTION_NONE;
134                 mutex_unlock(&rdev->pm.mutex);
135                 DRM_INFO("radeon: dynamic power management disabled\n");
136         } else if (tmp == 1) {
137                 if (rdev->pm.num_power_states > 1) {
138                         /* enable dynpm */
139                         mutex_lock(&rdev->pm.mutex);
140                         rdev->pm.state = PM_STATE_PAUSED;
141                         rdev->pm.planned_action = PM_ACTION_DEFAULT;
142                         radeon_get_power_state(rdev, rdev->pm.planned_action);
143                         mutex_unlock(&rdev->pm.mutex);
144                         /* update power mode info */
145                         radeon_pm_compute_clocks(rdev);
146                         DRM_INFO("radeon: dynamic power management enabled\n");
147                 } else
148                         DRM_ERROR("dynpm not valid on this system\n");
149         } else
150                 DRM_ERROR("Invalid setting: %d\n", tmp);
151
152         return count;
153 }
154
155 static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, radeon_get_power_state_static, radeon_set_power_state_static);
156 static DEVICE_ATTR(dynpm, S_IRUGO | S_IWUSR, radeon_get_dynpm, radeon_set_dynpm);
157
158
159 static const char *pm_state_names[4] = {
160         "PM_STATE_DISABLED",
161         "PM_STATE_MINIMUM",
162         "PM_STATE_PAUSED",
163         "PM_STATE_ACTIVE"
164 };
165
166 static const char *pm_state_types[5] = {
167         "",
168         "Powersave",
169         "Battery",
170         "Balanced",
171         "Performance",
172 };
173
174 static void radeon_print_power_mode_info(struct radeon_device *rdev)
175 {
176         int i, j;
177         bool is_default;
178
179         DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
180         for (i = 0; i < rdev->pm.num_power_states; i++) {
181                 if (rdev->pm.default_power_state_index == i)
182                         is_default = true;
183                 else
184                         is_default = false;
185                 DRM_INFO("State %d %s %s\n", i,
186                          pm_state_types[rdev->pm.power_state[i].type],
187                          is_default ? "(default)" : "");
188                 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
189                         DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].pcie_lanes);
190                 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
191                         DRM_INFO("\tSingle display only\n");
192                 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
193                 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
194                         if (rdev->flags & RADEON_IS_IGP)
195                                 DRM_INFO("\t\t%d engine: %d\n",
196                                          j,
197                                          rdev->pm.power_state[i].clock_info[j].sclk * 10);
198                         else
199                                 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
200                                          j,
201                                          rdev->pm.power_state[i].clock_info[j].sclk * 10,
202                                          rdev->pm.power_state[i].clock_info[j].mclk * 10);
203                 }
204         }
205 }
206
207 void radeon_sync_with_vblank(struct radeon_device *rdev)
208 {
209         if (rdev->pm.active_crtcs) {
210                 rdev->pm.vblank_sync = false;
211                 wait_event_timeout(
212                         rdev->irq.vblank_queue, rdev->pm.vblank_sync,
213                         msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
214         }
215 }
216
217 int radeon_pm_init(struct radeon_device *rdev)
218 {
219         rdev->pm.state = PM_STATE_DISABLED;
220         rdev->pm.planned_action = PM_ACTION_NONE;
221         rdev->pm.can_upclock = true;
222         rdev->pm.can_downclock = true;
223
224         if (rdev->bios) {
225                 if (rdev->is_atom_bios)
226                         radeon_atombios_get_power_modes(rdev);
227                 else
228                         radeon_combios_get_power_modes(rdev);
229                 radeon_print_power_mode_info(rdev);
230         }
231
232         if (radeon_debugfs_pm_init(rdev)) {
233                 DRM_ERROR("Failed to register debugfs file for PM!\n");
234         }
235
236         /* where's the best place to put this? */
237         device_create_file(rdev->dev, &dev_attr_power_state);
238         device_create_file(rdev->dev, &dev_attr_dynpm);
239
240         INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
241
242         if ((radeon_dynpm != -1 && radeon_dynpm) && (rdev->pm.num_power_states > 1)) {
243                 rdev->pm.state = PM_STATE_PAUSED;
244                 DRM_INFO("radeon: dynamic power management enabled\n");
245         }
246
247         DRM_INFO("radeon: power management initialized\n");
248
249         return 0;
250 }
251
252 void radeon_pm_fini(struct radeon_device *rdev)
253 {
254         if (rdev->pm.state != PM_STATE_DISABLED) {
255                 /* cancel work */
256                 cancel_delayed_work_sync(&rdev->pm.idle_work);
257                 /* reset default clocks */
258                 rdev->pm.state = PM_STATE_DISABLED;
259                 rdev->pm.planned_action = PM_ACTION_DEFAULT;
260                 radeon_pm_set_clocks(rdev);
261         } else if ((rdev->pm.current_power_state_index !=
262                     rdev->pm.default_power_state_index) ||
263                    (rdev->pm.current_clock_mode_index != 0)) {
264                 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
265                 rdev->pm.requested_clock_mode_index = 0;
266                 mutex_lock(&rdev->pm.mutex);
267                 radeon_pm_set_power_mode_static_locked(rdev);
268                 mutex_unlock(&rdev->pm.mutex);
269         }
270
271         device_remove_file(rdev->dev, &dev_attr_power_state);
272         device_remove_file(rdev->dev, &dev_attr_dynpm);
273
274         if (rdev->pm.i2c_bus)
275                 radeon_i2c_destroy(rdev->pm.i2c_bus);
276 }
277
278 void radeon_pm_compute_clocks(struct radeon_device *rdev)
279 {
280         struct drm_device *ddev = rdev->ddev;
281         struct drm_crtc *crtc;
282         struct radeon_crtc *radeon_crtc;
283
284         if (rdev->pm.state == PM_STATE_DISABLED)
285                 return;
286
287         mutex_lock(&rdev->pm.mutex);
288
289         rdev->pm.active_crtcs = 0;
290         rdev->pm.active_crtc_count = 0;
291         list_for_each_entry(crtc,
292                 &ddev->mode_config.crtc_list, head) {
293                 radeon_crtc = to_radeon_crtc(crtc);
294                 if (radeon_crtc->enabled) {
295                         rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
296                         rdev->pm.active_crtc_count++;
297                 }
298         }
299
300         if (rdev->pm.active_crtc_count > 1) {
301                 if (rdev->pm.state == PM_STATE_ACTIVE) {
302                         cancel_delayed_work(&rdev->pm.idle_work);
303
304                         rdev->pm.state = PM_STATE_PAUSED;
305                         rdev->pm.planned_action = PM_ACTION_UPCLOCK;
306                         radeon_pm_set_clocks(rdev);
307
308                         DRM_DEBUG("radeon: dynamic power management deactivated\n");
309                 }
310         } else if (rdev->pm.active_crtc_count == 1) {
311                 /* TODO: Increase clocks if needed for current mode */
312
313                 if (rdev->pm.state == PM_STATE_MINIMUM) {
314                         rdev->pm.state = PM_STATE_ACTIVE;
315                         rdev->pm.planned_action = PM_ACTION_UPCLOCK;
316                         radeon_pm_set_clocks(rdev);
317
318                         queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
319                                 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
320                 } else if (rdev->pm.state == PM_STATE_PAUSED) {
321                         rdev->pm.state = PM_STATE_ACTIVE;
322                         queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
323                                 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
324                         DRM_DEBUG("radeon: dynamic power management activated\n");
325                 }
326         } else { /* count == 0 */
327                 if (rdev->pm.state != PM_STATE_MINIMUM) {
328                         cancel_delayed_work(&rdev->pm.idle_work);
329
330                         rdev->pm.state = PM_STATE_MINIMUM;
331                         rdev->pm.planned_action = PM_ACTION_MINIMUM;
332                         radeon_pm_set_clocks(rdev);
333                 }
334         }
335
336         mutex_unlock(&rdev->pm.mutex);
337 }
338
339 bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
340 {
341         u32 stat_crtc = 0;
342         bool in_vbl = true;
343
344         if (ASIC_IS_DCE4(rdev)) {
345                 if (rdev->pm.active_crtcs & (1 << 0)) {
346                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
347                         if (!(stat_crtc & 1))
348                                 in_vbl = false;
349                 }
350                 if (rdev->pm.active_crtcs & (1 << 1)) {
351                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
352                         if (!(stat_crtc & 1))
353                                 in_vbl = false;
354                 }
355                 if (rdev->pm.active_crtcs & (1 << 2)) {
356                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
357                         if (!(stat_crtc & 1))
358                                 in_vbl = false;
359                 }
360                 if (rdev->pm.active_crtcs & (1 << 3)) {
361                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
362                         if (!(stat_crtc & 1))
363                                 in_vbl = false;
364                 }
365                 if (rdev->pm.active_crtcs & (1 << 4)) {
366                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
367                         if (!(stat_crtc & 1))
368                                 in_vbl = false;
369                 }
370                 if (rdev->pm.active_crtcs & (1 << 5)) {
371                         stat_crtc = RREG32(EVERGREEN_CRTC_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
372                         if (!(stat_crtc & 1))
373                                 in_vbl = false;
374                 }
375         } else if (ASIC_IS_AVIVO(rdev)) {
376                 if (rdev->pm.active_crtcs & (1 << 0)) {
377                         stat_crtc = RREG32(D1CRTC_STATUS);
378                         if (!(stat_crtc & 1))
379                                 in_vbl = false;
380                 }
381                 if (rdev->pm.active_crtcs & (1 << 1)) {
382                         stat_crtc = RREG32(D2CRTC_STATUS);
383                         if (!(stat_crtc & 1))
384                                 in_vbl = false;
385                 }
386         } else {
387                 if (rdev->pm.active_crtcs & (1 << 0)) {
388                         stat_crtc = RREG32(RADEON_CRTC_STATUS);
389                         if (!(stat_crtc & 1))
390                                 in_vbl = false;
391                 }
392                 if (rdev->pm.active_crtcs & (1 << 1)) {
393                         stat_crtc = RREG32(RADEON_CRTC2_STATUS);
394                         if (!(stat_crtc & 1))
395                                 in_vbl = false;
396                 }
397         }
398         if (in_vbl == false)
399                 DRM_INFO("not in vbl for pm change %08x at %s\n", stat_crtc,
400                          finish ? "exit" : "entry");
401         return in_vbl;
402 }
403 static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
404 {
405         /*radeon_fence_wait_last(rdev);*/
406
407         radeon_set_power_state(rdev, false);
408         rdev->pm.planned_action = PM_ACTION_NONE;
409 }
410
411 static void radeon_pm_set_clocks(struct radeon_device *rdev)
412 {
413         int i;
414
415         radeon_get_power_state(rdev, rdev->pm.planned_action);
416         mutex_lock(&rdev->cp.mutex);
417
418         /* wait for GPU idle */
419         rdev->pm.gui_idle = false;
420         rdev->irq.gui_idle = true;
421         radeon_irq_set(rdev);
422         wait_event_interruptible_timeout(
423                 rdev->irq.idle_queue, rdev->pm.gui_idle,
424                 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
425         rdev->irq.gui_idle = false;
426         radeon_irq_set(rdev);
427
428         for (i = 0; i < rdev->num_crtc; i++) {
429                 if (rdev->pm.active_crtcs & (1 << i)) {
430                         rdev->pm.req_vblank |= (1 << i);
431                         drm_vblank_get(rdev->ddev, i);
432                 }
433         }
434         radeon_pm_set_clocks_locked(rdev);
435         for (i = 0; i < rdev->num_crtc; i++) {
436                 if (rdev->pm.req_vblank & (1 << i)) {
437                         rdev->pm.req_vblank &= ~(1 << i);
438                         drm_vblank_put(rdev->ddev, i);
439                 }
440         }
441
442         /* update display watermarks based on new power state */
443         radeon_update_bandwidth_info(rdev);
444         if (rdev->pm.active_crtc_count)
445                 radeon_bandwidth_update(rdev);
446
447         mutex_unlock(&rdev->cp.mutex);
448 }
449
450 static void radeon_pm_idle_work_handler(struct work_struct *work)
451 {
452         struct radeon_device *rdev;
453         rdev = container_of(work, struct radeon_device,
454                                 pm.idle_work.work);
455
456         mutex_lock(&rdev->pm.mutex);
457         if (rdev->pm.state == PM_STATE_ACTIVE) {
458                 unsigned long irq_flags;
459                 int not_processed = 0;
460
461                 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
462                 if (!list_empty(&rdev->fence_drv.emited)) {
463                         struct list_head *ptr;
464                         list_for_each(ptr, &rdev->fence_drv.emited) {
465                                 /* count up to 3, that's enought info */
466                                 if (++not_processed >= 3)
467                                         break;
468                         }
469                 }
470                 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
471
472                 if (not_processed >= 3) { /* should upclock */
473                         if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
474                                 rdev->pm.planned_action = PM_ACTION_NONE;
475                         } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
476                                    rdev->pm.can_upclock) {
477                                 rdev->pm.planned_action =
478                                         PM_ACTION_UPCLOCK;
479                                 rdev->pm.action_timeout = jiffies +
480                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
481                         }
482                 } else if (not_processed == 0) { /* should downclock */
483                         if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
484                                 rdev->pm.planned_action = PM_ACTION_NONE;
485                         } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
486                                    rdev->pm.can_downclock) {
487                                 rdev->pm.planned_action =
488                                         PM_ACTION_DOWNCLOCK;
489                                 rdev->pm.action_timeout = jiffies +
490                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
491                         }
492                 }
493
494                 if (rdev->pm.planned_action != PM_ACTION_NONE &&
495                     jiffies > rdev->pm.action_timeout) {
496                         radeon_pm_set_clocks(rdev);
497                 }
498         }
499         mutex_unlock(&rdev->pm.mutex);
500
501         queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
502                                         msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
503 }
504
505 /*
506  * Debugfs info
507  */
508 #if defined(CONFIG_DEBUG_FS)
509
510 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
511 {
512         struct drm_info_node *node = (struct drm_info_node *) m->private;
513         struct drm_device *dev = node->minor->dev;
514         struct radeon_device *rdev = dev->dev_private;
515
516         seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
517         seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
518         seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
519         seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
520         if (rdev->asic->get_memory_clock)
521                 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
522         if (rdev->asic->get_pcie_lanes)
523                 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
524
525         return 0;
526 }
527
528 static struct drm_info_list radeon_pm_info_list[] = {
529         {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
530 };
531 #endif
532
533 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
534 {
535 #if defined(CONFIG_DEBUG_FS)
536         return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
537 #else
538         return 0;
539 #endif
540 }