git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge commit 'v2.6.32-rc6' into perf/core
[safe/jmp/linux-2.6]
/
kernel
/
trace
/
ring_buffer.c
diff --git
a/kernel/trace/ring_buffer.c
b/kernel/trace/ring_buffer.c
index
1766c0e
..
63446f1
100644
(file)
--- a/
kernel/trace/ring_buffer.c
+++ b/
kernel/trace/ring_buffer.c
@@
-201,8
+201,6
@@
int tracing_is_on(void)
}
EXPORT_SYMBOL_GPL(tracing_is_on);
}
EXPORT_SYMBOL_GPL(tracing_is_on);
-#include "trace.h"
-
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@
-399,18
+397,21
@@
int ring_buffer_print_page_header(struct trace_seq *s)
int ret;
ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
int ret;
ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
- "offset:0;\tsize:%u;\n",
- (unsigned int)sizeof(field.time_stamp));
+ "offset:0;\tsize:%u;\tsigned:%u;\n",
+ (unsigned int)sizeof(field.time_stamp),
+ (unsigned int)is_signed_type(u64));
ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
- "offset:%u;\tsize:%u;\n",
+ "offset:%u;\tsize:%u;\
tsigned:%u;\
n",
(unsigned int)offsetof(typeof(field), commit),
(unsigned int)offsetof(typeof(field), commit),
- (unsigned int)sizeof(field.commit));
+ (unsigned int)sizeof(field.commit),
+ (unsigned int)is_signed_type(long));
ret = trace_seq_printf(s, "\tfield: char data;\t"
ret = trace_seq_printf(s, "\tfield: char data;\t"
- "offset:%u;\tsize:%u;\n",
+ "offset:%u;\tsize:%u;\
tsigned:%u;\
n",
(unsigned int)offsetof(typeof(field), data),
(unsigned int)offsetof(typeof(field), data),
- (unsigned int)BUF_PAGE_SIZE);
+ (unsigned int)BUF_PAGE_SIZE,
+ (unsigned int)is_signed_type(char));
return ret;
}
return ret;
}
@@
-485,7
+486,7
@@
struct ring_buffer_iter {
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
-static inline u64 rb_time_stamp(struct ring_buffer *buffer
, int cpu
)
+static inline u64 rb_time_stamp(struct ring_buffer *buffer)
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
{
/* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT;
@@
-496,7
+497,7
@@
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
u64 time;
preempt_disable_notrace();
u64 time;
preempt_disable_notrace();
- time = rb_time_stamp(buffer
, cpu
);
+ time = rb_time_stamp(buffer);
preempt_enable_no_resched_notrace();
return time;
preempt_enable_no_resched_notrace();
return time;
@@
-601,7
+602,7
@@
static struct list_head *rb_list_head(struct list_head *list)
}
/*
}
/*
- * rb_is_head_page - test if the give page is the head page
+ * rb_is_head_page - test if the give
n
page is the head page
*
* Because the reader may move the head_page pointer, we can
* not trust what the head page is (it may be pointing to
*
* Because the reader may move the head_page pointer, we can
* not trust what the head page is (it may be pointing to
@@
-701,8
+702,8
@@
static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
val &= ~RB_FLAG_MASK;
val &= ~RB_FLAG_MASK;
- ret =
(unsigned long)cmpxchg(
&list->next,
-
val | old_flag, val | new_flag);
+ ret =
cmpxchg((unsigned long *)
&list->next,
+ val | old_flag, val | new_flag);
/* check if the reader took the page */
if ((ret & ~RB_FLAG_MASK) != val)
/* check if the reader took the page */
if ((ret & ~RB_FLAG_MASK) != val)
@@
-794,7
+795,7
@@
static int rb_head_page_replace(struct buffer_page *old,
val = *ptr & ~RB_FLAG_MASK;
val |= RB_PAGE_HEAD;
val = *ptr & ~RB_FLAG_MASK;
val |= RB_PAGE_HEAD;
- ret = cmpxchg(ptr, val, &new->list);
+ ret = cmpxchg(ptr, val,
(unsigned long)
&new->list);
return ret == val;
}
return ret == val;
}
@@
-1870,7
+1871,7
@@
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
* Nested commits always have zero deltas, so
* just reread the time stamp
*/
- *ts = rb_time_stamp(buffer
, cpu_buffer->cpu
);
+ *ts = rb_time_stamp(buffer);
next_page->page->time_stamp = *ts;
}
next_page->page->time_stamp = *ts;
}
@@
-2084,6
+2085,7
@@
rb_reserve_next_event(struct ring_buffer *buffer,
rb_start_commit(cpu_buffer);
rb_start_commit(cpu_buffer);
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
/*
* Due to the ability to swap a cpu buffer from a buffer
* it is possible it was swapped before we committed.
/*
* Due to the ability to swap a cpu buffer from a buffer
* it is possible it was swapped before we committed.
@@
-2096,6
+2098,7
@@
rb_reserve_next_event(struct ring_buffer *buffer,
local_dec(&cpu_buffer->commits);
return NULL;
}
local_dec(&cpu_buffer->commits);
return NULL;
}
+#endif
length = rb_calculate_event_length(length);
again:
length = rb_calculate_event_length(length);
again:
@@
-2111,7
+2114,7
@@
rb_reserve_next_event(struct ring_buffer *buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
goto out_fail;
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
goto out_fail;
- ts = rb_time_stamp(cpu_buffer->buffer
, cpu_buffer->cpu
);
+ ts = rb_time_stamp(cpu_buffer->buffer);
/*
* Only the first commit can update the timestamp.
/*
* Only the first commit can update the timestamp.
@@
-2681,7
+2684,7
@@
unsigned long ring_buffer_entries(struct ring_buffer *buffer)
EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
- * ring_buffer_overrun
_cpu
- get the number of overruns in buffer
+ * ring_buffer_overrun
s
- get the number of overruns in buffer
* @buffer: The ring buffer
*
* Returns the total number of overruns in the ring buffer
* @buffer: The ring buffer
*
* Returns the total number of overruns in the ring buffer
@@
-2995,15
+2998,12
@@
static void rb_advance_iter(struct ring_buffer_iter *iter)
}
static struct ring_buffer_event *
}
static struct ring_buffer_event *
-rb_buffer_peek(struct ring_buffer
*buffer, int cpu
, u64 *ts)
+rb_buffer_peek(struct ring_buffer
_per_cpu *cpu_buffer
, u64 *ts)
{
{
- struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
struct buffer_page *reader;
int nr_loops = 0;
struct ring_buffer_event *event;
struct buffer_page *reader;
int nr_loops = 0;
- cpu_buffer = buffer->buffers[cpu];
-
again:
/*
* We repeat when a timestamp is encountered. It is possible
again:
/*
* We repeat when a timestamp is encountered. It is possible
@@
-3047,7
+3047,7
@@
rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = cpu_buffer->read_stamp + event->time_delta;
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = cpu_buffer->read_stamp + event->time_delta;
- ring_buffer_normalize_time_stamp(buffer,
+ ring_buffer_normalize_time_stamp(
cpu_buffer->
buffer,
cpu_buffer->cpu, ts);
}
return event;
cpu_buffer->cpu, ts);
}
return event;
@@
-3166,7
+3166,7
@@
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
- event = rb_buffer_peek(
buffer, cpu
, ts);
+ event = rb_buffer_peek(
cpu_buffer
, ts);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
if (dolock)
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
if (dolock)
@@
-3235,7
+3235,7
@@
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
- event = rb_buffer_peek(
buffer, cpu
, ts);
+ event = rb_buffer_peek(
cpu_buffer
, ts);
if (event)
rb_advance_reader(cpu_buffer);
if (event)
rb_advance_reader(cpu_buffer);
@@
-3498,6
+3498,7
@@
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
}
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
* @buffer_a: One buffer to swap with
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
* @buffer_a: One buffer to swap with
@@
-3573,6
+3574,7
@@
out:
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
+#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
/**
* ring_buffer_alloc_read_page - allocate a page to read from buffer
/**
* ring_buffer_alloc_read_page - allocate a page to read from buffer