diff options
Diffstat (limited to 'driver/gator_events_block.c')
-rw-r--r-- | driver/gator_events_block.c | 70 |
1 files changed, 27 insertions, 43 deletions
diff --git a/driver/gator_events_block.c b/driver/gator_events_block.c index a8b8114..b18c3ca 100644 --- a/driver/gator_events_block.c +++ b/driver/gator_events_block.c | |||
@@ -25,15 +25,13 @@ static ulong block_rq_wr_enabled; | |||
25 | static ulong block_rq_rd_enabled; | 25 | static ulong block_rq_rd_enabled; |
26 | static ulong block_rq_wr_key; | 26 | static ulong block_rq_wr_key; |
27 | static ulong block_rq_rd_key; | 27 | static ulong block_rq_rd_key; |
28 | static DEFINE_PER_CPU(int[BLOCK_TOTAL], blockCnt); | 28 | static atomic_t blockCnt[BLOCK_TOTAL]; |
29 | static DEFINE_PER_CPU(int[BLOCK_TOTAL * 4], blockGet); | 29 | static int blockGet[BLOCK_TOTAL * 4]; |
30 | static DEFINE_PER_CPU(bool, new_data_avail); | ||
31 | 30 | ||
32 | GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq)) | 31 | GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct request *rq)) |
33 | { | 32 | { |
34 | unsigned long flags; | 33 | unsigned long flags; |
35 | int write, size; | 34 | int write, size; |
36 | int cpu = smp_processor_id(); | ||
37 | 35 | ||
38 | if (!rq) | 36 | if (!rq) |
39 | return; | 37 | return; |
@@ -47,13 +45,16 @@ GATOR_DEFINE_PROBE(block_rq_complete, TP_PROTO(struct request_queue *q, struct r | |||
47 | // disable interrupts to synchronize with gator_events_block_read() | 45 | // disable interrupts to synchronize with gator_events_block_read() |
48 | // spinlocks not needed since percpu buffers are used | 46 | // spinlocks not needed since percpu buffers are used |
49 | local_irq_save(flags); | 47 | local_irq_save(flags); |
50 | if (write) | 48 | if (write) { |
51 | per_cpu(blockCnt, cpu)[BLOCK_RQ_WR] += size; | 49 | if (block_rq_wr_enabled) { |
52 | else | 50 | atomic_add(size, &blockCnt[BLOCK_RQ_WR]); |
53 | per_cpu(blockCnt, cpu)[BLOCK_RQ_RD] += size; | 51 | } |
52 | } else { | ||
53 | if (block_rq_rd_enabled) { | ||
54 | atomic_add(size, &blockCnt[BLOCK_RQ_RD]); | ||
55 | } | ||
56 | } | ||
54 | local_irq_restore(flags); | 57 | local_irq_restore(flags); |
55 | |||
56 | per_cpu(new_data_avail, cpu) = true; | ||
57 | } | 58 | } |
58 | 59 | ||
59 | static int gator_events_block_create_files(struct super_block *sb, struct dentry *root) | 60 | static int gator_events_block_create_files(struct super_block *sb, struct dentry *root) |
@@ -81,11 +82,6 @@ static int gator_events_block_create_files(struct super_block *sb, struct dentry | |||
81 | 82 | ||
82 | static int gator_events_block_start(void) | 83 | static int gator_events_block_start(void) |
83 | { | 84 | { |
84 | int cpu; | ||
85 | |||
86 | for_each_present_cpu(cpu) | ||
87 | per_cpu(new_data_avail, cpu) = true; | ||
88 | |||
89 | // register tracepoints | 85 | // register tracepoints |
90 | if (block_rq_wr_enabled || block_rq_rd_enabled) | 86 | if (block_rq_wr_enabled || block_rq_rd_enabled) |
91 | if (GATOR_REGISTER_TRACE(block_rq_complete)) | 87 | if (GATOR_REGISTER_TRACE(block_rq_complete)) |
@@ -113,44 +109,32 @@ static void gator_events_block_stop(void) | |||
113 | 109 | ||
114 | static int gator_events_block_read(int **buffer) | 110 | static int gator_events_block_read(int **buffer) |
115 | { | 111 | { |
116 | unsigned long flags; | 112 | int len, value, data = 0; |
117 | int len, value, cpu, data = 0; | ||
118 | cpu = smp_processor_id(); | ||
119 | 113 | ||
120 | if (per_cpu(new_data_avail, cpu) == false) | 114 | if (smp_processor_id() != 0) { |
121 | return 0; | 115 | return 0; |
122 | 116 | } | |
123 | per_cpu(new_data_avail, cpu) = false; | ||
124 | 117 | ||
125 | len = 0; | 118 | len = 0; |
126 | if (block_rq_wr_enabled) { | 119 | if (block_rq_wr_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_WR])) > 0) { |
127 | local_irq_save(flags); | 120 | atomic_sub(value, &blockCnt[BLOCK_RQ_WR]); |
128 | value = per_cpu(blockCnt, cpu)[BLOCK_RQ_WR]; | 121 | blockGet[len++] = block_rq_wr_key; |
129 | per_cpu(blockCnt, cpu)[BLOCK_RQ_WR] = 0; | 122 | blockGet[len++] = 0; // indicates to Streamline that value bytes were written now, not since the last message |
130 | local_irq_restore(flags); | 123 | blockGet[len++] = block_rq_wr_key; |
131 | per_cpu(blockGet, cpu)[len++] = block_rq_wr_key; | 124 | blockGet[len++] = value; |
132 | per_cpu(blockGet, cpu)[len++] = 0; // indicates to Streamline that value bytes were written now, not since the last message | ||
133 | per_cpu(blockGet, cpu)[len++] = block_rq_wr_key; | ||
134 | per_cpu(blockGet, cpu)[len++] = value; | ||
135 | data += value; | 125 | data += value; |
136 | } | 126 | } |
137 | if (block_rq_rd_enabled) { | 127 | if (block_rq_rd_enabled && (value = atomic_read(&blockCnt[BLOCK_RQ_RD])) > 0) { |
138 | local_irq_save(flags); | 128 | atomic_sub(value, &blockCnt[BLOCK_RQ_RD]); |
139 | value = per_cpu(blockCnt, cpu)[BLOCK_RQ_RD]; | 129 | blockGet[len++] = block_rq_rd_key; |
140 | per_cpu(blockCnt, cpu)[BLOCK_RQ_RD] = 0; | 130 | blockGet[len++] = 0; // indicates to Streamline that value bytes were read now, not since the last message |
141 | local_irq_restore(flags); | 131 | blockGet[len++] = block_rq_rd_key; |
142 | per_cpu(blockGet, cpu)[len++] = block_rq_rd_key; | 132 | blockGet[len++] = value; |
143 | per_cpu(blockGet, cpu)[len++] = 0; // indicates to Streamline that value bytes were read now, not since the last message | ||
144 | per_cpu(blockGet, cpu)[len++] = block_rq_rd_key; | ||
145 | per_cpu(blockGet, cpu)[len++] = value; | ||
146 | data += value; | 133 | data += value; |
147 | } | 134 | } |
148 | 135 | ||
149 | if (data != 0) | ||
150 | per_cpu(new_data_avail, cpu) = true; | ||
151 | |||
152 | if (buffer) | 136 | if (buffer) |
153 | *buffer = per_cpu(blockGet, cpu); | 137 | *buffer = blockGet; |
154 | 138 | ||
155 | return len; | 139 | return len; |
156 | } | 140 | } |