sysSentry/ebpf-fix-collect-iodump.patch
2024-10-12 11:18:17 +08:00

786 lines
28 KiB
Diff

From 4b64893261f70e4136ac8f37b7fa396492b674c4 Mon Sep 17 00:00:00 2001
From: zhangnan <zhangnan134@huawei.com>
Date: Sat, 12 Oct 2024 11:24:32 +0800
Subject: [PATCH] ebpf fix collect iodump
---
src/c/ebpf_collector/ebpf_collector.bpf.c | 292 +++++++++++-----------
src/c/ebpf_collector/ebpf_collector.c | 51 +++-
src/c/ebpf_collector/ebpf_collector.h | 18 +-
3 files changed, 200 insertions(+), 161 deletions(-)
diff --git a/src/c/ebpf_collector/ebpf_collector.bpf.c b/src/c/ebpf_collector/ebpf_collector.bpf.c
index 28cdde2..870a677 100644
--- a/src/c/ebpf_collector/ebpf_collector.bpf.c
+++ b/src/c/ebpf_collector/ebpf_collector.bpf.c
@@ -92,6 +92,35 @@ struct bpf_map_def SEC("maps") tag_args = {
.max_entries = 1000,
};
+struct bpf_map_def SEC("maps") blk_res_2 = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(struct time_range_io_count),
+ .max_entries = MAX_IO_TIME,
+};
+
+struct bpf_map_def SEC("maps") bio_res_2 = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(struct time_range_io_count),
+ .max_entries = MAX_IO_TIME,
+};
+
+struct bpf_map_def SEC("maps") wbt_res_2 = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(struct time_range_io_count),
+ .max_entries = MAX_IO_TIME,
+};
+
+struct bpf_map_def SEC("maps") tag_res_2 = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(struct time_range_io_count),
+ .max_entries = MAX_IO_TIME,
+};
+
+
struct blk_mq_alloc_data {
/* input parameter */
struct request_queue *q;
@@ -148,39 +177,12 @@ static __always_inline void blk_fill_rwbs(char *rwbs, unsigned int op)
}
}
-void update_new_data_in_start(struct stage_data *new_data, struct update_params *params) {
- blk_fill_rwbs(new_data->io_type, params->cmd_flags);
- if (new_data->bucket[params->update_bucket].start_range == params->curr_start_range){
- new_data->bucket[params->update_bucket].io_count += 1;
- } else {
- new_data->bucket[MAX_BUCKETS].io_count += new_data->bucket[params->update_bucket].io_count;
- new_data->bucket[params->update_bucket].io_count = 1;
- new_data->bucket[params->update_bucket].start_range = params->curr_start_range;
- }
-}
-
void update_curr_data_in_start(struct stage_data *curr_data, struct update_params *params) {
if (curr_data && params) {
curr_data->start_count += 1;
curr_data->major = params->major;
curr_data->first_minor = params->first_minor;
blk_fill_rwbs(curr_data->io_type, params->cmd_flags);
- if (curr_data->bucket[params->update_bucket].start_range == params->curr_start_range) {
- curr_data->bucket[params->update_bucket].io_count += 1;
- } else {
- curr_data->bucket[MAX_BUCKETS].io_count += curr_data->bucket[params->update_bucket].io_count;
- curr_data->bucket[params->update_bucket].io_count = 1;
- }
- curr_data->bucket[params->update_bucket].start_range = params->curr_start_range;
- }
-}
-
-void update_new_data_in_finish(struct stage_data *new_data, struct update_params *params) {
- blk_fill_rwbs(new_data->io_type, params->cmd_flags);
- if (new_data->bucket[params->update_bucket].start_range == params->curr_start_range){
- new_data->bucket[params->update_bucket].io_count = (new_data->bucket[params->update_bucket].io_count > 1) ? new_data->bucket[params->update_bucket].io_count - 1 : 0;
- } else {
- new_data->bucket[MAX_BUCKETS].io_count = (new_data->bucket[MAX_BUCKETS].io_count > 1) ? new_data->bucket[MAX_BUCKETS].io_count - 1 : 0;
}
}
@@ -204,7 +206,6 @@ static void init_io_counter(struct io_counter *counterp, int major, int first_mi
}
}
-
u32 find_matching_tag_1_keys(int major, int minor) {
u32 key = 0;
struct stage_data *curr_data = bpf_map_lookup_elem(&tag_res, &key);
@@ -705,6 +706,7 @@ u32 find_matching_wbt_5_keys(int major, int minor) {
return MAP_SIZE + 1;
}
+// start rq_driver
SEC("kprobe/blk_mq_start_request")
int kprobe_blk_mq_start_request(struct pt_regs *regs)
{
@@ -742,14 +744,12 @@ int kprobe_blk_mq_start_request(struct pt_regs *regs)
if (err)
return 0;
- u64 curr_start_range = zero.start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 curr_start_range = zero.start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
@@ -764,20 +764,28 @@ int kprobe_blk_mq_start_request(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_start(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&blk_res, &key, &new_data, 0);
} else {
update_curr_data_in_start(curr_data, &params);
}
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&blk_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&blk_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], 1);
+ }
+ }
+
return 0;
}
+// finish rq_driver
SEC("kprobe/blk_mq_free_request")
int kprobe_blk_mq_free_request(struct pt_regs *regs)
{
@@ -811,15 +819,13 @@ int kprobe_blk_mq_free_request(struct pt_regs *regs)
return 0;
}
- u64 duration = bpf_ktime_get_ns() - counterp->start_time;
- u64 curr_start_range = counterp->start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 duration = bpf_ktime_get_ns() - counterp->start_time;
+ u64 curr_start_range = counterp->start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
@@ -834,12 +840,8 @@ int kprobe_blk_mq_free_request(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&blk_res, &key, &new_data, 0);
} else if (curr_data == NULL) {
struct stage_data new_data = {
@@ -850,28 +852,30 @@ int kprobe_blk_mq_free_request(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&blk_res, &key, &new_data, 0);
} else {
- if (curr_data->bucket[update_bucket].start_range == curr_start_range) {
- curr_data->bucket[update_bucket].io_count = (curr_data->bucket[update_bucket].io_count > 1) ? curr_data->bucket[update_bucket].io_count - 1 : 0;
- } else {
- curr_data->bucket[MAX_BUCKETS].io_count = (curr_data->bucket[MAX_BUCKETS].io_count > 1) ? curr_data->bucket[MAX_BUCKETS].io_count - 1 : 0;
-
- }
curr_data->duration += duration;
update_curr_data_in_finish(curr_data, &params, &duration);
}
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&blk_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&blk_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE && curr_data_time_range->count[key] > 0) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], -1);
+ }
+ }
+
bpf_map_delete_elem(&blk_map, &rq);
return 0;
}
+// start bio
SEC("kprobe/blk_mq_make_request")
int kprobe_blk_mq_make_request(struct pt_regs *regs)
{
@@ -909,20 +913,18 @@ int kprobe_blk_mq_make_request(struct pt_regs *regs)
if (err && err != -EEXIST)
return 0;
- u64 curr_start_range = zero.start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 curr_start_range = zero.start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
struct stage_data *curr_data;
curr_data = bpf_map_lookup_elem(&bio_res, &key);
- if (curr_data == NULL) {
+ if (curr_data == NULL) {
struct stage_data new_data = {
.start_count = 1,
.finish_count = 0,
@@ -931,20 +933,28 @@ int kprobe_blk_mq_make_request(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_start(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&bio_res, &key, &new_data, 0);
- } else {
+ } else {
update_curr_data_in_start(curr_data, &params);
}
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&bio_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&bio_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], 1);
+ }
+ }
+
return 0;
}
+// finish bio
SEC("kprobe/bio_endio")
int kprobe_bio_endio(struct pt_regs *regs)
{
@@ -982,20 +992,18 @@ int kprobe_bio_endio(struct pt_regs *regs)
delete_map = &bio_map;
u64 duration = bpf_ktime_get_ns() - counterp->start_time;
- u64 curr_start_range = counterp->start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 curr_start_range = counterp->start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
struct stage_data *curr_data;
curr_data = bpf_map_lookup_elem(&bio_res, &key);
- if (curr_data == NULL && duration > DURATION_THRESHOLD) {
+ if (curr_data == NULL && duration > DURATION_THRESHOLD) {
struct stage_data new_data = {
.start_count = 1,
.finish_count = 1,
@@ -1004,14 +1012,10 @@ int kprobe_bio_endio(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&bio_res, &key, &new_data, 0);
- } else if (curr_data == NULL) {
+ } else if (curr_data == NULL) {
struct stage_data new_data = {
.start_count = 1,
.finish_count = 1,
@@ -1020,28 +1024,30 @@ int kprobe_bio_endio(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&bio_res, &key, &new_data, 0);
} else {
- if (curr_data->bucket[update_bucket].start_range == curr_start_range) {
- curr_data->bucket[update_bucket].io_count = (curr_data->bucket[update_bucket].io_count > 1) ? curr_data->bucket[update_bucket].io_count - 1 : 0;
- } else {
- curr_data->bucket[MAX_BUCKETS].io_count = (curr_data->bucket[MAX_BUCKETS].io_count > 1) ? curr_data->bucket[MAX_BUCKETS].io_count - 1 : 0;
-
- }
curr_data->duration += duration;
update_curr_data_in_finish(curr_data, &params, &duration);
}
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&bio_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&bio_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE && curr_data_time_range->count[key] > 0) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], -1);
+ }
+ }
+
bpf_map_delete_elem(delete_map, &bio);
return 0;
}
+// start wbt
SEC("kprobe/wbt_wait")
int kprobe_wbt_wait(struct pt_regs *regs)
{
@@ -1082,14 +1088,12 @@ int kprobe_wbt_wait(struct pt_regs *regs)
if (err)
return 0;
- u64 curr_start_range = zero.start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 curr_start_range = zero.start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
@@ -1104,20 +1108,28 @@ int kprobe_wbt_wait(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_start(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&wbt_res, &key, &new_data, 0);
} else {
update_curr_data_in_start(curr_data, &params);
}
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&wbt_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&wbt_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], 1);
+ }
+ }
+
return 0;
}
+// finish wbt
SEC("kretprobe/wbt_wait")
int kretprobe_wbt_wait(struct pt_regs *regs)
{
@@ -1159,14 +1171,12 @@ int kretprobe_wbt_wait(struct pt_regs *regs)
return 0;
u64 duration = bpf_ktime_get_ns() - counterp->start_time;
- u64 curr_start_range = counterp->start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 curr_start_range = counterp->start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
@@ -1181,12 +1191,8 @@ int kretprobe_wbt_wait(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&wbt_res, &key, &new_data, 0);
} else if (curr_data == NULL) {
struct stage_data new_data = {
@@ -1197,29 +1203,31 @@ int kretprobe_wbt_wait(struct pt_regs *regs)
.io_type = "",
.major = major,
.first_minor = first_minor,
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&wbt_res, &key, &new_data, 0);
} else {
- if (curr_data->bucket[update_bucket].start_range == curr_start_range) {
- curr_data->bucket[update_bucket].io_count = (curr_data->bucket[update_bucket].io_count > 1) ? curr_data->bucket[update_bucket].io_count - 1 : 0;
- } else {
- curr_data->bucket[MAX_BUCKETS].io_count = (curr_data->bucket[MAX_BUCKETS].io_count > 1) ? curr_data->bucket[MAX_BUCKETS].io_count - 1 : 0;
-
- }
curr_data->duration += duration;
update_curr_data_in_finish(curr_data, &params, &duration);
}
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&wbt_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&wbt_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE && curr_data_time_range->count[key] > 0) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], -1);
+ }
+ }
+
bpf_map_delete_elem(&wbt_map, &wbtkey);
bpf_map_delete_elem(&wbt_args, &wbtkey);
return 0;
}
+// start get_tag
SEC("kprobe/blk_mq_get_tag")
int kprobe_blk_mq_get_tag(struct pt_regs *regs)
{
@@ -1262,14 +1270,12 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs)
if (err)
return 0;
- u64 curr_start_range = zero.start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 curr_start_range = zero.start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
@@ -1284,20 +1290,28 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_start(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&tag_res, &key, &new_data, 0);
} else {
update_curr_data_in_start(curr_data, &params);
}
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&tag_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&tag_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], 1);
+ }
+ }
+
return 0;
}
+// finish get_tag
SEC("kretprobe/blk_mq_get_tag")
int kretprobe_blk_mq_get_tag(struct pt_regs *regs)
{
@@ -1343,14 +1357,12 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs)
return 0;
u64 duration = bpf_ktime_get_ns() - counterp->start_time;
- u64 curr_start_range = counterp->start_time / THRESHOLD / MAX_BUCKETS;
- u64 update_bucket = curr_start_range % MAX_BUCKETS;
+ u64 curr_start_range = counterp->start_time / THRESHOLD;
struct update_params params = {
.major = major,
.first_minor = first_minor,
.cmd_flags = cmd_flags,
- .update_bucket = update_bucket,
.curr_start_range = curr_start_range,
};
@@ -1365,12 +1377,8 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&tag_res, &key, &new_data, 0);
} else if (curr_data == NULL) {
struct stage_data new_data = {
@@ -1381,23 +1389,25 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs)
.major = major,
.first_minor = first_minor,
.io_type = "",
- .bucket = {
- [0] = {.start_range = 0, .io_count = 0},
- [1] = {.start_range = 0, .io_count = 0},
- },
};
- update_new_data_in_finish(&new_data, &params);
+ blk_fill_rwbs(new_data.io_type, cmd_flags);
bpf_map_update_elem(&tag_res, &key, &new_data, 0);
} else {
- if (curr_data->bucket[update_bucket].start_range == curr_start_range) {
- curr_data->bucket[update_bucket].io_count = (curr_data->bucket[update_bucket].io_count > 1) ? curr_data->bucket[update_bucket].io_count - 1 : 0;
- } else {
- curr_data->bucket[MAX_BUCKETS].io_count = (curr_data->bucket[MAX_BUCKETS].io_count > 1) ? curr_data->bucket[MAX_BUCKETS].io_count - 1 : 0;
-
- }
curr_data->duration += duration;
update_curr_data_in_finish(curr_data, &params, &duration);
}
+
+ struct time_range_io_count *curr_data_time_range;
+ curr_data_time_range = bpf_map_lookup_elem(&tag_res_2, &curr_start_range);
+ if (curr_data_time_range == NULL) {
+ struct time_range_io_count new_data = { .count = {0} };
+ bpf_map_update_elem(&tag_res_2, &curr_start_range, &new_data, 0);
+ } else {
+ if (key < MAP_SIZE && curr_data_time_range->count[key] > 0) {
+ __sync_fetch_and_add(&curr_data_time_range->count[key], -1);
+ }
+ }
+
bpf_map_delete_elem(&tag_map, &tagkey);
bpf_map_delete_elem(&tag_args, &tagkey);
return 0;
diff --git a/src/c/ebpf_collector/ebpf_collector.c b/src/c/ebpf_collector/ebpf_collector.c
index a949ae8..6e981da 100644
--- a/src/c/ebpf_collector/ebpf_collector.c
+++ b/src/c/ebpf_collector/ebpf_collector.c
@@ -30,6 +30,10 @@
#define WBT_RES (map_fd[5])
#define TAG_MAP (map_fd[7])
#define TAG_RES (map_fd[8])
+#define BLK_RES_2 (map_fd[10])
+#define BIO_RES_2 (map_fd[11])
+#define WBT_RES_2 (map_fd[12])
+#define TAG_RES_2 (map_fd[13])
#define BPF_FILE "/usr/lib/ebpf_collector.bpf.o"
typedef struct {
@@ -113,16 +117,40 @@ char* find_device_name(dev_t dev) {
return device_name;
}
-static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size)
+static int print_map_res(struct bpf_map *map_res, struct bpf_map *map_res_2, char *stage, int *map_size)
{
+ int err;
struct stage_data counter;
- int key = 0;
+ struct time_range_io_count time_count;
+ int key = 0;
+ int io_dump[MAP_SIZE] = {0};
+ u32 io_dump_key = 0, io_dump_next_key = 0;
struct sysinfo info;
- sysinfo(&info);
+ sysinfo(&info);
+
+ while (bpf_map_get_next_key(map_res_2, &io_dump_key, &io_dump_next_key) == 0) {
+ err = bpf_map_lookup_elem(map_res_2, &io_dump_next_key, &time_count);
+ if (err < 0) {
+ fprintf(stderr, "failed to lookup %s io dump: %d\n", stage, err);
+ continue;
+ }
+ io_dump_key = io_dump_next_key;
+ if ((info.uptime - io_dump_key) > 2) {
+ int isempty = 1;
+ for (key = 0; key < map_size; key++){
+ if (time_count.count[key] > 0) {
+ io_dump[key] += time_count.count[key];
+ isempty = 0;
+ }
+ }
+ if (isempty || (info.uptime - io_dump_key) > IO_DUMP_THRESHOLD) {
+ bpf_map_delete_elem(map_res_2, &io_dump_key);
+ }
+ }
+ }
for (key = 0; key < map_size; key++) {
- int err;
err = bpf_map_lookup_elem(map_res, &key, &counter);
if (err < 0) {
fprintf(stderr, "failed to lookup %s map_res: %d\n", stage, err);
@@ -141,11 +169,11 @@ static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size)
dev_t dev = makedev(major, first_minor);
char *device_name = find_device_name(dev);
if (device_name && io_type) {
- printf("%-7s %10llu %10llu %u %c %s\n",
+ printf("%-7s %10llu %10llu %d %c %s\n",
stage,
counter.finish_count,
counter.duration,
- counter.bucket[MAX_BUCKETS].io_count,
+ io_dump[key],
io_type,
device_name
);
@@ -158,8 +186,8 @@ static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size)
int init_map(int *map_fd, const char *map_name, int *map_size, DeviceInfo *devices) {
struct stage_data init_data = {0};
+
memset(init_data.io_type, 0, sizeof(init_data.io_type));
- memset(init_data.bucket, 0, sizeof(init_data.bucket));
for (int i = 0; i < map_size; i++) {
init_data.major = devices[i].major;
@@ -246,19 +274,19 @@ int main(int argc, char **argv) {
sleep(1);
- err = print_map_res(BLK_RES, "rq_driver", device_count);
+ err = print_map_res(BLK_RES, BLK_RES_2, "rq_driver", device_count);
if (err)
break;
- err = print_map_res(BIO_RES, "bio", device_count);
+ err = print_map_res(BIO_RES, BIO_RES_2, "bio", device_count);
if (err)
break;
- err = print_map_res(TAG_RES, "gettag", device_count);
+ err = print_map_res(TAG_RES, TAG_RES_2, "gettag", device_count);
if (err)
break;
- err = print_map_res(WBT_RES, "wbt", device_count);
+ err = print_map_res(WBT_RES, WBT_RES_2, "wbt", device_count);
if (err)
break;
@@ -268,3 +296,4 @@ int main(int argc, char **argv) {
return -err;
}
+
diff --git a/src/c/ebpf_collector/ebpf_collector.h b/src/c/ebpf_collector/ebpf_collector.h
index dca04d8..fcebc93 100644
--- a/src/c/ebpf_collector/ebpf_collector.h
+++ b/src/c/ebpf_collector/ebpf_collector.h
@@ -10,7 +10,8 @@
typedef long long unsigned int u64;
typedef unsigned int u32;
-#define MAX_BUCKETS 1
+#define MAX_IO_TIME 130
+#define IO_DUMP_THRESHOLD 120
#define THRESHOLD 1000000000
#define DURATION_THRESHOLD 500000000
@@ -29,7 +30,7 @@ typedef unsigned int u32;
#define REQ_OP_DISCARD 3
#define REQ_OP_SECURE_ERASE 5
#define REQ_OP_WRITE_SAME 7
-#define MAP_SIZE 128
+#define MAP_SIZE 15
enum stage_type {
BIO=0,
@@ -42,11 +43,6 @@ enum stage_type {
MAX_STAGE_TYPE,
};
-struct time_bucket {
- u64 start_range;
- u32 io_count;
-};
-
struct stage_data {
u64 start_count;
u64 finish_count;
@@ -55,7 +51,6 @@ struct stage_data {
int major;
int first_minor;
char io_type[RWBS_LEN];
- struct time_bucket bucket[MAX_BUCKETS+1];
};
struct io_counter {
@@ -70,8 +65,13 @@ struct update_params {
int major;
int first_minor;
unsigned int cmd_flags;
- u64 update_bucket;
u64 curr_start_range;
};
+struct time_range_io_count
+{
+ u32 count[MAP_SIZE];
+};
+
#endif /* __EBPFCOLLECTOR_H */
+
--
2.33.0