KVM: MMU: invalidate and flush on spte small->large page size change
[safe/jmp/linux-2.6] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/smp_lock.h>
27 #include <linux/time.h>
28 #include <linux/uaccess.h>
29
30 #include <trace/events/block.h>
31
32 #include "trace_output.h"
33
34 #ifdef CONFIG_BLK_DEV_IO_TRACE
35
36 static unsigned int blktrace_seq __read_mostly = 1;
37
38 static struct trace_array *blk_tr;
39 static bool blk_tracer_enabled __read_mostly;
40
41 /* Select an alternative, minimalistic output than the original one */
42 #define TRACE_BLK_OPT_CLASSIC   0x1
43
44 static struct tracer_opt blk_tracer_opts[] = {
45         /* Default disable the minimalistic output */
46         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47         { }
48 };
49
50 static struct tracer_flags blk_tracer_flags = {
51         .val  = 0,
52         .opts = blk_tracer_opts,
53 };
54
55 /* Global reference count of probes */
56 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
57
58 static void blk_register_tracepoints(void);
59 static void blk_unregister_tracepoints(void);
60
61 /*
62  * Send out a notify message.
63  */
64 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
65                        const void *data, size_t len)
66 {
67         struct blk_io_trace *t;
68         struct ring_buffer_event *event = NULL;
69         struct ring_buffer *buffer = NULL;
70         int pc = 0;
71         int cpu = smp_processor_id();
72         bool blk_tracer = blk_tracer_enabled;
73
74         if (blk_tracer) {
75                 buffer = blk_tr->buffer;
76                 pc = preempt_count();
77                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
78                                                   sizeof(*t) + len,
79                                                   0, pc);
80                 if (!event)
81                         return;
82                 t = ring_buffer_event_data(event);
83                 goto record_it;
84         }
85
86         if (!bt->rchan)
87                 return;
88
89         t = relay_reserve(bt->rchan, sizeof(*t) + len);
90         if (t) {
91                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
92                 t->time = ktime_to_ns(ktime_get());
93 record_it:
94                 t->device = bt->dev;
95                 t->action = action;
96                 t->pid = pid;
97                 t->cpu = cpu;
98                 t->pdu_len = len;
99                 memcpy((void *) t + sizeof(*t), data, len);
100
101                 if (blk_tracer)
102                         trace_buffer_unlock_commit(buffer, event, 0, pc);
103         }
104 }
105
106 /*
107  * Send out a notify for this process, if we haven't done so since a trace
108  * started
109  */
110 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
111 {
112         tsk->btrace_seq = blktrace_seq;
113         trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
114 }
115
116 static void trace_note_time(struct blk_trace *bt)
117 {
118         struct timespec now;
119         unsigned long flags;
120         u32 words[2];
121
122         getnstimeofday(&now);
123         words[0] = now.tv_sec;
124         words[1] = now.tv_nsec;
125
126         local_irq_save(flags);
127         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
128         local_irq_restore(flags);
129 }
130
131 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
132 {
133         int n;
134         va_list args;
135         unsigned long flags;
136         char *buf;
137
138         if (unlikely(bt->trace_state != Blktrace_running &&
139                      !blk_tracer_enabled))
140                 return;
141
142         local_irq_save(flags);
143         buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
144         va_start(args, fmt);
145         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
146         va_end(args);
147
148         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
149         local_irq_restore(flags);
150 }
151 EXPORT_SYMBOL_GPL(__trace_note_message);
152
153 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
154                          pid_t pid)
155 {
156         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
157                 return 1;
158         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
159                 return 1;
160         if (bt->pid && pid != bt->pid)
161                 return 1;
162
163         return 0;
164 }
165
166 /*
167  * Data direction bit lookup
168  */
169 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
170                                  BLK_TC_ACT(BLK_TC_WRITE) };
171
172 /* The ilog2() calls fall out because they're constant */
173 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
174           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
175
176 /*
177  * The worker for the various blk_add_trace*() types. Fills out a
178  * blk_io_trace structure and places it in a per-cpu subbuffer.
179  */
180 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
181                      int rw, u32 what, int error, int pdu_len, void *pdu_data)
182 {
183         struct task_struct *tsk = current;
184         struct ring_buffer_event *event = NULL;
185         struct ring_buffer *buffer = NULL;
186         struct blk_io_trace *t;
187         unsigned long flags = 0;
188         unsigned long *sequence;
189         pid_t pid;
190         int cpu, pc = 0;
191         bool blk_tracer = blk_tracer_enabled;
192
193         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
194                 return;
195
196         what |= ddir_act[rw & WRITE];
197         what |= MASK_TC_BIT(rw, BARRIER);
198         what |= MASK_TC_BIT(rw, SYNCIO);
199         what |= MASK_TC_BIT(rw, AHEAD);
200         what |= MASK_TC_BIT(rw, META);
201         what |= MASK_TC_BIT(rw, DISCARD);
202
203         pid = tsk->pid;
204         if (act_log_check(bt, what, sector, pid))
205                 return;
206         cpu = raw_smp_processor_id();
207
208         if (blk_tracer) {
209                 tracing_record_cmdline(current);
210
211                 buffer = blk_tr->buffer;
212                 pc = preempt_count();
213                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
214                                                   sizeof(*t) + pdu_len,
215                                                   0, pc);
216                 if (!event)
217                         return;
218                 t = ring_buffer_event_data(event);
219                 goto record_it;
220         }
221
222         /*
223          * A word about the locking here - we disable interrupts to reserve
224          * some space in the relay per-cpu buffer, to prevent an irq
225          * from coming in and stepping on our toes.
226          */
227         local_irq_save(flags);
228
229         if (unlikely(tsk->btrace_seq != blktrace_seq))
230                 trace_note_tsk(bt, tsk);
231
232         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
233         if (t) {
234                 sequence = per_cpu_ptr(bt->sequence, cpu);
235
236                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
237                 t->sequence = ++(*sequence);
238                 t->time = ktime_to_ns(ktime_get());
239 record_it:
240                 /*
241                  * These two are not needed in ftrace as they are in the
242                  * generic trace_entry, filled by tracing_generic_entry_update,
243                  * but for the trace_event->bin() synthesizer benefit we do it
244                  * here too.
245                  */
246                 t->cpu = cpu;
247                 t->pid = pid;
248
249                 t->sector = sector;
250                 t->bytes = bytes;
251                 t->action = what;
252                 t->device = bt->dev;
253                 t->error = error;
254                 t->pdu_len = pdu_len;
255
256                 if (pdu_len)
257                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
258
259                 if (blk_tracer) {
260                         trace_buffer_unlock_commit(buffer, event, 0, pc);
261                         return;
262                 }
263         }
264
265         local_irq_restore(flags);
266 }
267
268 static struct dentry *blk_tree_root;
269 static DEFINE_MUTEX(blk_tree_mutex);
270
271 static void blk_trace_free(struct blk_trace *bt)
272 {
273         debugfs_remove(bt->msg_file);
274         debugfs_remove(bt->dropped_file);
275         relay_close(bt->rchan);
276         debugfs_remove(bt->dir);
277         free_percpu(bt->sequence);
278         free_percpu(bt->msg_data);
279         kfree(bt);
280 }
281
282 static void blk_trace_cleanup(struct blk_trace *bt)
283 {
284         blk_trace_free(bt);
285         if (atomic_dec_and_test(&blk_probes_ref))
286                 blk_unregister_tracepoints();
287 }
288
289 int blk_trace_remove(struct request_queue *q)
290 {
291         struct blk_trace *bt;
292
293         bt = xchg(&q->blk_trace, NULL);
294         if (!bt)
295                 return -EINVAL;
296
297         if (bt->trace_state != Blktrace_running)
298                 blk_trace_cleanup(bt);
299
300         return 0;
301 }
302 EXPORT_SYMBOL_GPL(blk_trace_remove);
303
304 static int blk_dropped_open(struct inode *inode, struct file *filp)
305 {
306         filp->private_data = inode->i_private;
307
308         return 0;
309 }
310
311 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
312                                 size_t count, loff_t *ppos)
313 {
314         struct blk_trace *bt = filp->private_data;
315         char buf[16];
316
317         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
318
319         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
320 }
321
322 static const struct file_operations blk_dropped_fops = {
323         .owner =        THIS_MODULE,
324         .open =         blk_dropped_open,
325         .read =         blk_dropped_read,
326 };
327
328 static int blk_msg_open(struct inode *inode, struct file *filp)
329 {
330         filp->private_data = inode->i_private;
331
332         return 0;
333 }
334
335 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
336                                 size_t count, loff_t *ppos)
337 {
338         char *msg;
339         struct blk_trace *bt;
340
341         if (count >= BLK_TN_MAX_MSG)
342                 return -EINVAL;
343
344         msg = kmalloc(count + 1, GFP_KERNEL);
345         if (msg == NULL)
346                 return -ENOMEM;
347
348         if (copy_from_user(msg, buffer, count)) {
349                 kfree(msg);
350                 return -EFAULT;
351         }
352
353         msg[count] = '\0';
354         bt = filp->private_data;
355         __trace_note_message(bt, "%s", msg);
356         kfree(msg);
357
358         return count;
359 }
360
361 static const struct file_operations blk_msg_fops = {
362         .owner =        THIS_MODULE,
363         .open =         blk_msg_open,
364         .write =        blk_msg_write,
365 };
366
367 /*
368  * Keep track of how many times we encountered a full subbuffer, to aid
369  * the user space app in telling how many lost events there were.
370  */
371 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
372                                      void *prev_subbuf, size_t prev_padding)
373 {
374         struct blk_trace *bt;
375
376         if (!relay_buf_full(buf))
377                 return 1;
378
379         bt = buf->chan->private_data;
380         atomic_inc(&bt->dropped);
381         return 0;
382 }
383
384 static int blk_remove_buf_file_callback(struct dentry *dentry)
385 {
386         debugfs_remove(dentry);
387
388         return 0;
389 }
390
391 static struct dentry *blk_create_buf_file_callback(const char *filename,
392                                                    struct dentry *parent,
393                                                    int mode,
394                                                    struct rchan_buf *buf,
395                                                    int *is_global)
396 {
397         return debugfs_create_file(filename, mode, parent, buf,
398                                         &relay_file_operations);
399 }
400
401 static struct rchan_callbacks blk_relay_callbacks = {
402         .subbuf_start           = blk_subbuf_start_callback,
403         .create_buf_file        = blk_create_buf_file_callback,
404         .remove_buf_file        = blk_remove_buf_file_callback,
405 };
406
407 static void blk_trace_setup_lba(struct blk_trace *bt,
408                                 struct block_device *bdev)
409 {
410         struct hd_struct *part = NULL;
411
412         if (bdev)
413                 part = bdev->bd_part;
414
415         if (part) {
416                 bt->start_lba = part->start_sect;
417                 bt->end_lba = part->start_sect + part->nr_sects;
418         } else {
419                 bt->start_lba = 0;
420                 bt->end_lba = -1ULL;
421         }
422 }
423
424 /*
425  * Setup everything required to start tracing
426  */
427 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
428                        struct block_device *bdev,
429                        struct blk_user_trace_setup *buts)
430 {
431         struct blk_trace *old_bt, *bt = NULL;
432         struct dentry *dir = NULL;
433         int ret, i;
434
435         if (!buts->buf_size || !buts->buf_nr)
436                 return -EINVAL;
437
438         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
439         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
440
441         /*
442          * some device names have larger paths - convert the slashes
443          * to underscores for this to work as expected
444          */
445         for (i = 0; i < strlen(buts->name); i++)
446                 if (buts->name[i] == '/')
447                         buts->name[i] = '_';
448
449         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
450         if (!bt)
451                 return -ENOMEM;
452
453         ret = -ENOMEM;
454         bt->sequence = alloc_percpu(unsigned long);
455         if (!bt->sequence)
456                 goto err;
457
458         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
459         if (!bt->msg_data)
460                 goto err;
461
462         ret = -ENOENT;
463
464         mutex_lock(&blk_tree_mutex);
465         if (!blk_tree_root) {
466                 blk_tree_root = debugfs_create_dir("block", NULL);
467                 if (!blk_tree_root) {
468                         mutex_unlock(&blk_tree_mutex);
469                         goto err;
470                 }
471         }
472         mutex_unlock(&blk_tree_mutex);
473
474         dir = debugfs_create_dir(buts->name, blk_tree_root);
475
476         if (!dir)
477                 goto err;
478
479         bt->dir = dir;
480         bt->dev = dev;
481         atomic_set(&bt->dropped, 0);
482
483         ret = -EIO;
484         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
485                                                &blk_dropped_fops);
486         if (!bt->dropped_file)
487                 goto err;
488
489         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
490         if (!bt->msg_file)
491                 goto err;
492
493         bt->rchan = relay_open("trace", dir, buts->buf_size,
494                                 buts->buf_nr, &blk_relay_callbacks, bt);
495         if (!bt->rchan)
496                 goto err;
497
498         bt->act_mask = buts->act_mask;
499         if (!bt->act_mask)
500                 bt->act_mask = (u16) -1;
501
502         blk_trace_setup_lba(bt, bdev);
503
504         /* overwrite with user settings */
505         if (buts->start_lba)
506                 bt->start_lba = buts->start_lba;
507         if (buts->end_lba)
508                 bt->end_lba = buts->end_lba;
509
510         bt->pid = buts->pid;
511         bt->trace_state = Blktrace_setup;
512
513         ret = -EBUSY;
514         old_bt = xchg(&q->blk_trace, bt);
515         if (old_bt) {
516                 (void) xchg(&q->blk_trace, old_bt);
517                 goto err;
518         }
519
520         if (atomic_inc_return(&blk_probes_ref) == 1)
521                 blk_register_tracepoints();
522
523         return 0;
524 err:
525         blk_trace_free(bt);
526         return ret;
527 }
528
529 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
530                     struct block_device *bdev,
531                     char __user *arg)
532 {
533         struct blk_user_trace_setup buts;
534         int ret;
535
536         ret = copy_from_user(&buts, arg, sizeof(buts));
537         if (ret)
538                 return -EFAULT;
539
540         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
541         if (ret)
542                 return ret;
543
544         if (copy_to_user(arg, &buts, sizeof(buts))) {
545                 blk_trace_remove(q);
546                 return -EFAULT;
547         }
548         return 0;
549 }
550 EXPORT_SYMBOL_GPL(blk_trace_setup);
551
552 int blk_trace_startstop(struct request_queue *q, int start)
553 {
554         int ret;
555         struct blk_trace *bt = q->blk_trace;
556
557         if (bt == NULL)
558                 return -EINVAL;
559
560         /*
561          * For starting a trace, we can transition from a setup or stopped
562          * trace. For stopping a trace, the state must be running
563          */
564         ret = -EINVAL;
565         if (start) {
566                 if (bt->trace_state == Blktrace_setup ||
567                     bt->trace_state == Blktrace_stopped) {
568                         blktrace_seq++;
569                         smp_mb();
570                         bt->trace_state = Blktrace_running;
571
572                         trace_note_time(bt);
573                         ret = 0;
574                 }
575         } else {
576                 if (bt->trace_state == Blktrace_running) {
577                         bt->trace_state = Blktrace_stopped;
578                         relay_flush(bt->rchan);
579                         ret = 0;
580                 }
581         }
582
583         return ret;
584 }
585 EXPORT_SYMBOL_GPL(blk_trace_startstop);
586
587 /**
588  * blk_trace_ioctl: - handle the ioctls associated with tracing
589  * @bdev:       the block device
590  * @cmd:        the ioctl cmd
591  * @arg:        the argument data, if any
592  *
593  **/
594 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
595 {
596         struct request_queue *q;
597         int ret, start = 0;
598         char b[BDEVNAME_SIZE];
599
600         q = bdev_get_queue(bdev);
601         if (!q)
602                 return -ENXIO;
603
604         mutex_lock(&bdev->bd_mutex);
605
606         switch (cmd) {
607         case BLKTRACESETUP:
608                 bdevname(bdev, b);
609                 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
610                 break;
611         case BLKTRACESTART:
612                 start = 1;
613         case BLKTRACESTOP:
614                 ret = blk_trace_startstop(q, start);
615                 break;
616         case BLKTRACETEARDOWN:
617                 ret = blk_trace_remove(q);
618                 break;
619         default:
620                 ret = -ENOTTY;
621                 break;
622         }
623
624         mutex_unlock(&bdev->bd_mutex);
625         return ret;
626 }
627
628 /**
629  * blk_trace_shutdown: - stop and cleanup trace structures
630  * @q:    the request queue associated with the device
631  *
632  **/
633 void blk_trace_shutdown(struct request_queue *q)
634 {
635         if (q->blk_trace) {
636                 blk_trace_startstop(q, 0);
637                 blk_trace_remove(q);
638         }
639 }
640
641 /*
642  * blktrace probes
643  */
644
645 /**
646  * blk_add_trace_rq - Add a trace for a request oriented action
647  * @q:          queue the io is for
648  * @rq:         the source request
649  * @what:       the action
650  *
651  * Description:
652  *     Records an action against a request. Will log the bio offset + size.
653  *
654  **/
655 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
656                                     u32 what)
657 {
658         struct blk_trace *bt = q->blk_trace;
659         int rw = rq->cmd_flags & 0x03;
660
661         if (likely(!bt))
662                 return;
663
664         if (blk_discard_rq(rq))
665                 rw |= (1 << BIO_RW_DISCARD);
666
667         if (blk_pc_request(rq)) {
668                 what |= BLK_TC_ACT(BLK_TC_PC);
669                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
670                                 what, rq->errors, rq->cmd_len, rq->cmd);
671         } else  {
672                 what |= BLK_TC_ACT(BLK_TC_FS);
673                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
674                                 what, rq->errors, 0, NULL);
675         }
676 }
677
678 static void blk_add_trace_rq_abort(void *ignore,
679                                    struct request_queue *q, struct request *rq)
680 {
681         blk_add_trace_rq(q, rq, BLK_TA_ABORT);
682 }
683
684 static void blk_add_trace_rq_insert(void *ignore,
685                                     struct request_queue *q, struct request *rq)
686 {
687         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
688 }
689
690 static void blk_add_trace_rq_issue(void *ignore,
691                                    struct request_queue *q, struct request *rq)
692 {
693         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
694 }
695
696 static void blk_add_trace_rq_requeue(void *ignore,
697                                      struct request_queue *q,
698                                      struct request *rq)
699 {
700         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
701 }
702
703 static void blk_add_trace_rq_complete(void *ignore,
704                                       struct request_queue *q,
705                                       struct request *rq)
706 {
707         blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
708 }
709
710 /**
711  * blk_add_trace_bio - Add a trace for a bio oriented action
712  * @q:          queue the io is for
713  * @bio:        the source bio
714  * @what:       the action
715  *
716  * Description:
717  *     Records an action against a bio. Will log the bio offset + size.
718  *
719  **/
720 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
721                                      u32 what)
722 {
723         struct blk_trace *bt = q->blk_trace;
724
725         if (likely(!bt))
726                 return;
727
728         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
729                         !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
730 }
731
732 static void blk_add_trace_bio_bounce(void *ignore,
733                                      struct request_queue *q, struct bio *bio)
734 {
735         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
736 }
737
738 static void blk_add_trace_bio_complete(void *ignore,
739                                        struct request_queue *q, struct bio *bio)
740 {
741         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
742 }
743
744 static void blk_add_trace_bio_backmerge(void *ignore,
745                                         struct request_queue *q,
746                                         struct bio *bio)
747 {
748         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
749 }
750
751 static void blk_add_trace_bio_frontmerge(void *ignore,
752                                          struct request_queue *q,
753                                          struct bio *bio)
754 {
755         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
756 }
757
758 static void blk_add_trace_bio_queue(void *ignore,
759                                     struct request_queue *q, struct bio *bio)
760 {
761         blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
762 }
763
764 static void blk_add_trace_getrq(void *ignore,
765                                 struct request_queue *q,
766                                 struct bio *bio, int rw)
767 {
768         if (bio)
769                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
770         else {
771                 struct blk_trace *bt = q->blk_trace;
772
773                 if (bt)
774                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
775         }
776 }
777
778
779 static void blk_add_trace_sleeprq(void *ignore,
780                                   struct request_queue *q,
781                                   struct bio *bio, int rw)
782 {
783         if (bio)
784                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
785         else {
786                 struct blk_trace *bt = q->blk_trace;
787
788                 if (bt)
789                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
790                                         0, 0, NULL);
791         }
792 }
793
794 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
795 {
796         struct blk_trace *bt = q->blk_trace;
797
798         if (bt)
799                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
800 }
801
802 static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
803 {
804         struct blk_trace *bt = q->blk_trace;
805
806         if (bt) {
807                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
808                 __be64 rpdu = cpu_to_be64(pdu);
809
810                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
811                                 sizeof(rpdu), &rpdu);
812         }
813 }
814
815 static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
816 {
817         struct blk_trace *bt = q->blk_trace;
818
819         if (bt) {
820                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
821                 __be64 rpdu = cpu_to_be64(pdu);
822
823                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
824                                 sizeof(rpdu), &rpdu);
825         }
826 }
827
828 static void blk_add_trace_split(void *ignore,
829                                 struct request_queue *q, struct bio *bio,
830                                 unsigned int pdu)
831 {
832         struct blk_trace *bt = q->blk_trace;
833
834         if (bt) {
835                 __be64 rpdu = cpu_to_be64(pdu);
836
837                 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
838                                 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
839                                 sizeof(rpdu), &rpdu);
840         }
841 }
842
843 /**
844  * blk_add_trace_remap - Add a trace for a remap operation
845  * @ignore:     trace callback data parameter (not used)
846  * @q:          queue the io is for
847  * @bio:        the source bio
848  * @dev:        target device
849  * @from:       source sector
850  *
851  * Description:
852  *     Device mapper or raid target sometimes need to split a bio because
853  *     it spans a stripe (or similar). Add a trace for that action.
854  *
855  **/
856 static void blk_add_trace_remap(void *ignore,
857                                 struct request_queue *q, struct bio *bio,
858                                 dev_t dev, sector_t from)
859 {
860         struct blk_trace *bt = q->blk_trace;
861         struct blk_io_trace_remap r;
862
863         if (likely(!bt))
864                 return;
865
866         r.device_from = cpu_to_be32(dev);
867         r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
868         r.sector_from = cpu_to_be64(from);
869
870         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
871                         BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
872                         sizeof(r), &r);
873 }
874
875 /**
876  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
877  * @ignore:     trace callback data parameter (not used)
878  * @q:          queue the io is for
879  * @rq:         the source request
880  * @dev:        target device
881  * @from:       source sector
882  *
883  * Description:
884  *     Device mapper remaps request to other devices.
885  *     Add a trace for that action.
886  *
887  **/
888 static void blk_add_trace_rq_remap(void *ignore,
889                                    struct request_queue *q,
890                                    struct request *rq, dev_t dev,
891                                    sector_t from)
892 {
893         struct blk_trace *bt = q->blk_trace;
894         struct blk_io_trace_remap r;
895
896         if (likely(!bt))
897                 return;
898
899         r.device_from = cpu_to_be32(dev);
900         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
901         r.sector_from = cpu_to_be64(from);
902
903         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
904                         rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
905                         sizeof(r), &r);
906 }
907
908 /**
909  * blk_add_driver_data - Add binary message with driver-specific data
910  * @q:          queue the io is for
911  * @rq:         io request
912  * @data:       driver-specific data
913  * @len:        length of driver-specific data
914  *
915  * Description:
916  *     Some drivers might want to write driver-specific data per request.
917  *
918  **/
919 void blk_add_driver_data(struct request_queue *q,
920                          struct request *rq,
921                          void *data, size_t len)
922 {
923         struct blk_trace *bt = q->blk_trace;
924
925         if (likely(!bt))
926                 return;
927
928         if (blk_pc_request(rq))
929                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
930                                 BLK_TA_DRV_DATA, rq->errors, len, data);
931         else
932                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
933                                 BLK_TA_DRV_DATA, rq->errors, len, data);
934 }
935 EXPORT_SYMBOL_GPL(blk_add_driver_data);
936
937 static void blk_register_tracepoints(void)
938 {
939         int ret;
940
941         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
942         WARN_ON(ret);
943         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
944         WARN_ON(ret);
945         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
946         WARN_ON(ret);
947         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
948         WARN_ON(ret);
949         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
950         WARN_ON(ret);
951         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
952         WARN_ON(ret);
953         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
954         WARN_ON(ret);
955         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
956         WARN_ON(ret);
957         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
958         WARN_ON(ret);
959         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
960         WARN_ON(ret);
961         ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
962         WARN_ON(ret);
963         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
964         WARN_ON(ret);
965         ret = register_trace_block_plug(blk_add_trace_plug, NULL);
966         WARN_ON(ret);
967         ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
968         WARN_ON(ret);
969         ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
970         WARN_ON(ret);
971         ret = register_trace_block_split(blk_add_trace_split, NULL);
972         WARN_ON(ret);
973         ret = register_trace_block_remap(blk_add_trace_remap, NULL);
974         WARN_ON(ret);
975         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
976         WARN_ON(ret);
977 }
978
979 static void blk_unregister_tracepoints(void)
980 {
981         unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
982         unregister_trace_block_remap(blk_add_trace_remap, NULL);
983         unregister_trace_block_split(blk_add_trace_split, NULL);
984         unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
985         unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
986         unregister_trace_block_plug(blk_add_trace_plug, NULL);
987         unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
988         unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
989         unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
990         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
991         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
992         unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
993         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
994         unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
995         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
996         unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
997         unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
998         unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
999
1000         tracepoint_synchronize_unregister();
1001 }
1002
1003 /*
1004  * struct blk_io_tracer formatting routines
1005  */
1006
1007 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1008 {
1009         int i = 0;
1010         int tc = t->action >> BLK_TC_SHIFT;
1011
1012         if (t->action == BLK_TN_MESSAGE) {
1013                 rwbs[i++] = 'N';
1014                 goto out;
1015         }
1016
1017         if (tc & BLK_TC_DISCARD)
1018                 rwbs[i++] = 'D';
1019         else if (tc & BLK_TC_WRITE)
1020                 rwbs[i++] = 'W';
1021         else if (t->bytes)
1022                 rwbs[i++] = 'R';
1023         else
1024                 rwbs[i++] = 'N';
1025
1026         if (tc & BLK_TC_AHEAD)
1027                 rwbs[i++] = 'A';
1028         if (tc & BLK_TC_BARRIER)
1029                 rwbs[i++] = 'B';
1030         if (tc & BLK_TC_SYNC)
1031                 rwbs[i++] = 'S';
1032         if (tc & BLK_TC_META)
1033                 rwbs[i++] = 'M';
1034 out:
1035         rwbs[i] = '\0';
1036 }
1037
1038 static inline
1039 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1040 {
1041         return (const struct blk_io_trace *)ent;
1042 }
1043
1044 static inline const void *pdu_start(const struct trace_entry *ent)
1045 {
1046         return te_blk_io_trace(ent) + 1;
1047 }
1048
1049 static inline u32 t_action(const struct trace_entry *ent)
1050 {
1051         return te_blk_io_trace(ent)->action;
1052 }
1053
1054 static inline u32 t_bytes(const struct trace_entry *ent)
1055 {
1056         return te_blk_io_trace(ent)->bytes;
1057 }
1058
1059 static inline u32 t_sec(const struct trace_entry *ent)
1060 {
1061         return te_blk_io_trace(ent)->bytes >> 9;
1062 }
1063
1064 static inline unsigned long long t_sector(const struct trace_entry *ent)
1065 {
1066         return te_blk_io_trace(ent)->sector;
1067 }
1068
1069 static inline __u16 t_error(const struct trace_entry *ent)
1070 {
1071         return te_blk_io_trace(ent)->error;
1072 }
1073
1074 static __u64 get_pdu_int(const struct trace_entry *ent)
1075 {
1076         const __u64 *val = pdu_start(ent);
1077         return be64_to_cpu(*val);
1078 }
1079
1080 static void get_pdu_remap(const struct trace_entry *ent,
1081                           struct blk_io_trace_remap *r)
1082 {
1083         const struct blk_io_trace_remap *__r = pdu_start(ent);
1084         __u64 sector_from = __r->sector_from;
1085
1086         r->device_from = be32_to_cpu(__r->device_from);
1087         r->device_to   = be32_to_cpu(__r->device_to);
1088         r->sector_from = be64_to_cpu(sector_from);
1089 }
1090
1091 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1092
1093 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1094 {
1095         char rwbs[6];
1096         unsigned long long ts  = iter->ts;
1097         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1098         unsigned secs          = (unsigned long)ts;
1099         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1100
1101         fill_rwbs(rwbs, t);
1102
1103         return trace_seq_printf(&iter->seq,
1104                                 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1105                                 MAJOR(t->device), MINOR(t->device), iter->cpu,
1106                                 secs, nsec_rem, iter->ent->pid, act, rwbs);
1107 }
1108
1109 static int blk_log_action(struct trace_iterator *iter, const char *act)
1110 {
1111         char rwbs[6];
1112         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1113
1114         fill_rwbs(rwbs, t);
1115         return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1116                                 MAJOR(t->device), MINOR(t->device), act, rwbs);
1117 }
1118
1119 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1120 {
1121         const unsigned char *pdu_buf;
1122         int pdu_len;
1123         int i, end, ret;
1124
1125         pdu_buf = pdu_start(ent);
1126         pdu_len = te_blk_io_trace(ent)->pdu_len;
1127
1128         if (!pdu_len)
1129                 return 1;
1130
1131         /* find the last zero that needs to be printed */
1132         for (end = pdu_len - 1; end >= 0; end--)
1133                 if (pdu_buf[end])
1134                         break;
1135         end++;
1136
1137         if (!trace_seq_putc(s, '('))
1138                 return 0;
1139
1140         for (i = 0; i < pdu_len; i++) {
1141
1142                 ret = trace_seq_printf(s, "%s%02x",
1143                                        i == 0 ? "" : " ", pdu_buf[i]);
1144                 if (!ret)
1145                         return ret;
1146
1147                 /*
1148                  * stop when the rest is just zeroes and indicate so
1149                  * with a ".." appended
1150                  */
1151                 if (i == end && end != pdu_len - 1)
1152                         return trace_seq_puts(s, " ..) ");
1153         }
1154
1155         return trace_seq_puts(s, ") ");
1156 }
1157
1158 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1159 {
1160         char cmd[TASK_COMM_LEN];
1161
1162         trace_find_cmdline(ent->pid, cmd);
1163
1164         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1165                 int ret;
1166
1167                 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1168                 if (!ret)
1169                         return 0;
1170                 ret = blk_log_dump_pdu(s, ent);
1171                 if (!ret)
1172                         return 0;
1173                 return trace_seq_printf(s, "[%s]\n", cmd);
1174         } else {
1175                 if (t_sec(ent))
1176                         return trace_seq_printf(s, "%llu + %u [%s]\n",
1177                                                 t_sector(ent), t_sec(ent), cmd);
1178                 return trace_seq_printf(s, "[%s]\n", cmd);
1179         }
1180 }
1181
1182 static int blk_log_with_error(struct trace_seq *s,
1183                               const struct trace_entry *ent)
1184 {
1185         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1186                 int ret;
1187
1188                 ret = blk_log_dump_pdu(s, ent);
1189                 if (ret)
1190                         return trace_seq_printf(s, "[%d]\n", t_error(ent));
1191                 return 0;
1192         } else {
1193                 if (t_sec(ent))
1194                         return trace_seq_printf(s, "%llu + %u [%d]\n",
1195                                                 t_sector(ent),
1196                                                 t_sec(ent), t_error(ent));
1197                 return trace_seq_printf(s, "%llu [%d]\n",
1198                                         t_sector(ent), t_error(ent));
1199         }
1200 }
1201
1202 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1203 {
1204         struct blk_io_trace_remap r = { .device_from = 0, };
1205
1206         get_pdu_remap(ent, &r);
1207         return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1208                                 t_sector(ent), t_sec(ent),
1209                                 MAJOR(r.device_from), MINOR(r.device_from),
1210                                 (unsigned long long)r.sector_from);
1211 }
1212
1213 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1214 {
1215         char cmd[TASK_COMM_LEN];
1216
1217         trace_find_cmdline(ent->pid, cmd);
1218
1219         return trace_seq_printf(s, "[%s]\n", cmd);
1220 }
1221
1222 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1223 {
1224         char cmd[TASK_COMM_LEN];
1225
1226         trace_find_cmdline(ent->pid, cmd);
1227
1228         return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1229 }
1230
1231 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1232 {
1233         char cmd[TASK_COMM_LEN];
1234
1235         trace_find_cmdline(ent->pid, cmd);
1236
1237         return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1238                                 get_pdu_int(ent), cmd);
1239 }
1240
1241 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1242 {
1243         int ret;
1244         const struct blk_io_trace *t = te_blk_io_trace(ent);
1245
1246         ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1247         if (ret)
1248                 return trace_seq_putc(s, '\n');
1249         return ret;
1250 }
1251
1252 /*
1253  * struct tracer operations
1254  */
1255
1256 static void blk_tracer_print_header(struct seq_file *m)
1257 {
1258         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1259                 return;
1260         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1261                     "#  |     |     |           |   |   |\n");
1262 }
1263
1264 static void blk_tracer_start(struct trace_array *tr)
1265 {
1266         blk_tracer_enabled = true;
1267 }
1268
1269 static int blk_tracer_init(struct trace_array *tr)
1270 {
1271         blk_tr = tr;
1272         blk_tracer_start(tr);
1273         return 0;
1274 }
1275
1276 static void blk_tracer_stop(struct trace_array *tr)
1277 {
1278         blk_tracer_enabled = false;
1279 }
1280
1281 static void blk_tracer_reset(struct trace_array *tr)
1282 {
1283         blk_tracer_stop(tr);
1284 }
1285
1286 static const struct {
1287         const char *act[2];
1288         int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
1289 } what2act[] = {
1290         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1291         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1292         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1293         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1294         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1295         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1296         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1297         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1298         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1299         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1300         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1301         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1302         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1303         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1304         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1305 };
1306
1307 static enum print_line_t print_one_line(struct trace_iterator *iter,
1308                                         bool classic)
1309 {
1310         struct trace_seq *s = &iter->seq;
1311         const struct blk_io_trace *t;
1312         u16 what;
1313         int ret;
1314         bool long_act;
1315         blk_log_action_t *log_action;
1316
1317         t          = te_blk_io_trace(iter->ent);
1318         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1319         long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
1320         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1321
1322         if (t->action == BLK_TN_MESSAGE) {
1323                 ret = log_action(iter, long_act ? "message" : "m");
1324                 if (ret)
1325                         ret = blk_log_msg(s, iter->ent);
1326                 goto out;
1327         }
1328
1329         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1330                 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1331         else {
1332                 ret = log_action(iter, what2act[what].act[long_act]);
1333                 if (ret)
1334                         ret = what2act[what].print(s, iter->ent);
1335         }
1336 out:
1337         return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1338 }
1339
1340 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1341                                                int flags, struct trace_event *event)
1342 {
1343         return print_one_line(iter, false);
1344 }
1345
1346 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1347 {
1348         struct trace_seq *s = &iter->seq;
1349         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1350         const int offset = offsetof(struct blk_io_trace, sector);
1351         struct blk_io_trace old = {
1352                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1353                 .time     = iter->ts,
1354         };
1355
1356         if (!trace_seq_putmem(s, &old, offset))
1357                 return 0;
1358         return trace_seq_putmem(s, &t->sector,
1359                                 sizeof(old) - offset + t->pdu_len);
1360 }
1361
1362 static enum print_line_t
1363 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1364                              struct trace_event *event)
1365 {
1366         return blk_trace_synthesize_old_trace(iter) ?
1367                         TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1368 }
1369
1370 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1371 {
1372         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1373                 return TRACE_TYPE_UNHANDLED;
1374
1375         return print_one_line(iter, true);
1376 }
1377
1378 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1379 {
1380         /* don't output context-info for blk_classic output */
1381         if (bit == TRACE_BLK_OPT_CLASSIC) {
1382                 if (set)
1383                         trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1384                 else
1385                         trace_flags |= TRACE_ITER_CONTEXT_INFO;
1386         }
1387         return 0;
1388 }
1389
1390 static struct tracer blk_tracer __read_mostly = {
1391         .name           = "blk",
1392         .init           = blk_tracer_init,
1393         .reset          = blk_tracer_reset,
1394         .start          = blk_tracer_start,
1395         .stop           = blk_tracer_stop,
1396         .print_header   = blk_tracer_print_header,
1397         .print_line     = blk_tracer_print_line,
1398         .flags          = &blk_tracer_flags,
1399         .set_flag       = blk_tracer_set_flag,
1400 };
1401
1402 static struct trace_event_functions trace_blk_event_funcs = {
1403         .trace          = blk_trace_event_print,
1404         .binary         = blk_trace_event_print_binary,
1405 };
1406
1407 static struct trace_event trace_blk_event = {
1408         .type           = TRACE_BLK,
1409         .funcs          = &trace_blk_event_funcs,
1410 };
1411
1412 static int __init init_blk_tracer(void)
1413 {
1414         if (!register_ftrace_event(&trace_blk_event)) {
1415                 pr_warning("Warning: could not register block events\n");
1416                 return 1;
1417         }
1418
1419         if (register_tracer(&blk_tracer) != 0) {
1420                 pr_warning("Warning: could not register the block tracer\n");
1421                 unregister_ftrace_event(&trace_blk_event);
1422                 return 1;
1423         }
1424
1425         return 0;
1426 }
1427
1428 device_initcall(init_blk_tracer);
1429
1430 static int blk_trace_remove_queue(struct request_queue *q)
1431 {
1432         struct blk_trace *bt;
1433
1434         bt = xchg(&q->blk_trace, NULL);
1435         if (bt == NULL)
1436                 return -EINVAL;
1437
1438         if (atomic_dec_and_test(&blk_probes_ref))
1439                 blk_unregister_tracepoints();
1440
1441         blk_trace_free(bt);
1442         return 0;
1443 }
1444
1445 /*
1446  * Setup everything required to start tracing
1447  */
1448 static int blk_trace_setup_queue(struct request_queue *q,
1449                                  struct block_device *bdev)
1450 {
1451         struct blk_trace *old_bt, *bt = NULL;
1452         int ret = -ENOMEM;
1453
1454         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1455         if (!bt)
1456                 return -ENOMEM;
1457
1458         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1459         if (!bt->msg_data)
1460                 goto free_bt;
1461
1462         bt->dev = bdev->bd_dev;
1463         bt->act_mask = (u16)-1;
1464
1465         blk_trace_setup_lba(bt, bdev);
1466
1467         old_bt = xchg(&q->blk_trace, bt);
1468         if (old_bt != NULL) {
1469                 (void)xchg(&q->blk_trace, old_bt);
1470                 ret = -EBUSY;
1471                 goto free_bt;
1472         }
1473
1474         if (atomic_inc_return(&blk_probes_ref) == 1)
1475                 blk_register_tracepoints();
1476         return 0;
1477
1478 free_bt:
1479         blk_trace_free(bt);
1480         return ret;
1481 }
1482
1483 /*
1484  * sysfs interface to enable and configure tracing
1485  */
1486
1487 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1488                                          struct device_attribute *attr,
1489                                          char *buf);
1490 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1491                                           struct device_attribute *attr,
1492                                           const char *buf, size_t count);
1493 #define BLK_TRACE_DEVICE_ATTR(_name) \
1494         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1495                     sysfs_blk_trace_attr_show, \
1496                     sysfs_blk_trace_attr_store)
1497
1498 static BLK_TRACE_DEVICE_ATTR(enable);
1499 static BLK_TRACE_DEVICE_ATTR(act_mask);
1500 static BLK_TRACE_DEVICE_ATTR(pid);
1501 static BLK_TRACE_DEVICE_ATTR(start_lba);
1502 static BLK_TRACE_DEVICE_ATTR(end_lba);
1503
1504 static struct attribute *blk_trace_attrs[] = {
1505         &dev_attr_enable.attr,
1506         &dev_attr_act_mask.attr,
1507         &dev_attr_pid.attr,
1508         &dev_attr_start_lba.attr,
1509         &dev_attr_end_lba.attr,
1510         NULL
1511 };
1512
1513 struct attribute_group blk_trace_attr_group = {
1514         .name  = "trace",
1515         .attrs = blk_trace_attrs,
1516 };
1517
1518 static const struct {
1519         int mask;
1520         const char *str;
1521 } mask_maps[] = {
1522         { BLK_TC_READ,          "read"          },
1523         { BLK_TC_WRITE,         "write"         },
1524         { BLK_TC_BARRIER,       "barrier"       },
1525         { BLK_TC_SYNC,          "sync"          },
1526         { BLK_TC_QUEUE,         "queue"         },
1527         { BLK_TC_REQUEUE,       "requeue"       },
1528         { BLK_TC_ISSUE,         "issue"         },
1529         { BLK_TC_COMPLETE,      "complete"      },
1530         { BLK_TC_FS,            "fs"            },
1531         { BLK_TC_PC,            "pc"            },
1532         { BLK_TC_AHEAD,         "ahead"         },
1533         { BLK_TC_META,          "meta"          },
1534         { BLK_TC_DISCARD,       "discard"       },
1535         { BLK_TC_DRV_DATA,      "drv_data"      },
1536 };
1537
1538 static int blk_trace_str2mask(const char *str)
1539 {
1540         int i;
1541         int mask = 0;
1542         char *buf, *s, *token;
1543
1544         buf = kstrdup(str, GFP_KERNEL);
1545         if (buf == NULL)
1546                 return -ENOMEM;
1547         s = strstrip(buf);
1548
1549         while (1) {
1550                 token = strsep(&s, ",");
1551                 if (token == NULL)
1552                         break;
1553
1554                 if (*token == '\0')
1555                         continue;
1556
1557                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1558                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1559                                 mask |= mask_maps[i].mask;
1560                                 break;
1561                         }
1562                 }
1563                 if (i == ARRAY_SIZE(mask_maps)) {
1564                         mask = -EINVAL;
1565                         break;
1566                 }
1567         }
1568         kfree(buf);
1569
1570         return mask;
1571 }
1572
1573 static ssize_t blk_trace_mask2str(char *buf, int mask)
1574 {
1575         int i;
1576         char *p = buf;
1577
1578         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1579                 if (mask & mask_maps[i].mask) {
1580                         p += sprintf(p, "%s%s",
1581                                     (p == buf) ? "" : ",", mask_maps[i].str);
1582                 }
1583         }
1584         *p++ = '\n';
1585
1586         return p - buf;
1587 }
1588
1589 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1590 {
1591         if (bdev->bd_disk == NULL)
1592                 return NULL;
1593
1594         return bdev_get_queue(bdev);
1595 }
1596
1597 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1598                                          struct device_attribute *attr,
1599                                          char *buf)
1600 {
1601         struct hd_struct *p = dev_to_part(dev);
1602         struct request_queue *q;
1603         struct block_device *bdev;
1604         ssize_t ret = -ENXIO;
1605
1606         lock_kernel();
1607         bdev = bdget(part_devt(p));
1608         if (bdev == NULL)
1609                 goto out_unlock_kernel;
1610
1611         q = blk_trace_get_queue(bdev);
1612         if (q == NULL)
1613                 goto out_bdput;
1614
1615         mutex_lock(&bdev->bd_mutex);
1616
1617         if (attr == &dev_attr_enable) {
1618                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1619                 goto out_unlock_bdev;
1620         }
1621
1622         if (q->blk_trace == NULL)
1623                 ret = sprintf(buf, "disabled\n");
1624         else if (attr == &dev_attr_act_mask)
1625                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1626         else if (attr == &dev_attr_pid)
1627                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1628         else if (attr == &dev_attr_start_lba)
1629                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1630         else if (attr == &dev_attr_end_lba)
1631                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1632
1633 out_unlock_bdev:
1634         mutex_unlock(&bdev->bd_mutex);
1635 out_bdput:
1636         bdput(bdev);
1637 out_unlock_kernel:
1638         unlock_kernel();
1639         return ret;
1640 }
1641
1642 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1643                                           struct device_attribute *attr,
1644                                           const char *buf, size_t count)
1645 {
1646         struct block_device *bdev;
1647         struct request_queue *q;
1648         struct hd_struct *p;
1649         u64 value;
1650         ssize_t ret = -EINVAL;
1651
1652         if (count == 0)
1653                 goto out;
1654
1655         if (attr == &dev_attr_act_mask) {
1656                 if (sscanf(buf, "%llx", &value) != 1) {
1657                         /* Assume it is a list of trace category names */
1658                         ret = blk_trace_str2mask(buf);
1659                         if (ret < 0)
1660                                 goto out;
1661                         value = ret;
1662                 }
1663         } else if (sscanf(buf, "%llu", &value) != 1)
1664                 goto out;
1665
1666         ret = -ENXIO;
1667
1668         lock_kernel();
1669         p = dev_to_part(dev);
1670         bdev = bdget(part_devt(p));
1671         if (bdev == NULL)
1672                 goto out_unlock_kernel;
1673
1674         q = blk_trace_get_queue(bdev);
1675         if (q == NULL)
1676                 goto out_bdput;
1677
1678         mutex_lock(&bdev->bd_mutex);
1679
1680         if (attr == &dev_attr_enable) {
1681                 if (value)
1682                         ret = blk_trace_setup_queue(q, bdev);
1683                 else
1684                         ret = blk_trace_remove_queue(q);
1685                 goto out_unlock_bdev;
1686         }
1687
1688         ret = 0;
1689         if (q->blk_trace == NULL)
1690                 ret = blk_trace_setup_queue(q, bdev);
1691
1692         if (ret == 0) {
1693                 if (attr == &dev_attr_act_mask)
1694                         q->blk_trace->act_mask = value;
1695                 else if (attr == &dev_attr_pid)
1696                         q->blk_trace->pid = value;
1697                 else if (attr == &dev_attr_start_lba)
1698                         q->blk_trace->start_lba = value;
1699                 else if (attr == &dev_attr_end_lba)
1700                         q->blk_trace->end_lba = value;
1701         }
1702
1703 out_unlock_bdev:
1704         mutex_unlock(&bdev->bd_mutex);
1705 out_bdput:
1706         bdput(bdev);
1707 out_unlock_kernel:
1708         unlock_kernel();
1709 out:
1710         return ret ? ret : count;
1711 }
1712
1713 int blk_trace_init_sysfs(struct device *dev)
1714 {
1715         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1716 }
1717
1718 void blk_trace_remove_sysfs(struct device *dev)
1719 {
1720         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1721 }
1722
1723 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1724
1725 #ifdef CONFIG_EVENT_TRACING
1726
1727 void blk_dump_cmd(char *buf, struct request *rq)
1728 {
1729         int i, end;
1730         int len = rq->cmd_len;
1731         unsigned char *cmd = rq->cmd;
1732
1733         if (!blk_pc_request(rq)) {
1734                 buf[0] = '\0';
1735                 return;
1736         }
1737
1738         for (end = len - 1; end >= 0; end--)
1739                 if (cmd[end])
1740                         break;
1741         end++;
1742
1743         for (i = 0; i < len; i++) {
1744                 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1745                 if (i == end && end != len - 1) {
1746                         sprintf(buf, " ..");
1747                         break;
1748                 }
1749         }
1750 }
1751
1752 void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1753 {
1754         int i = 0;
1755
1756         if (rw & WRITE)
1757                 rwbs[i++] = 'W';
1758         else if (rw & 1 << BIO_RW_DISCARD)
1759                 rwbs[i++] = 'D';
1760         else if (bytes)
1761                 rwbs[i++] = 'R';
1762         else
1763                 rwbs[i++] = 'N';
1764
1765         if (rw & 1 << BIO_RW_AHEAD)
1766                 rwbs[i++] = 'A';
1767         if (rw & 1 << BIO_RW_BARRIER)
1768                 rwbs[i++] = 'B';
1769         if (rw & 1 << BIO_RW_SYNCIO)
1770                 rwbs[i++] = 'S';
1771         if (rw & 1 << BIO_RW_META)
1772                 rwbs[i++] = 'M';
1773
1774         rwbs[i] = '\0';
1775 }
1776
1777 void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1778 {
1779         int rw = rq->cmd_flags & 0x03;
1780         int bytes;
1781
1782         if (blk_discard_rq(rq))
1783                 rw |= (1 << BIO_RW_DISCARD);
1784
1785         bytes = blk_rq_bytes(rq);
1786
1787         blk_fill_rwbs(rwbs, rw, bytes);
1788 }
1789
1790 #endif /* CONFIG_EVENT_TRACING */
1791