sysSentry/ebpf-adpat-to-the-kylin-platform.patch
2024-12-20 11:19:14 +08:00

811 lines
24 KiB
Diff

From 42aa4db73e1005bb981625f1142bd9082fd57059 Mon Sep 17 00:00:00 2001
From: zhuofeng <zhuofeng2@huawei.com>
Date: Wed, 18 Dec 2024 11:04:56 +0800
Subject: [PATCH] ebpf adpat to the kylin platform
---
src/c/ebpf_collector/ebpf_collector.bpf.c | 506 +++++++++++++++++++++-
src/c/ebpf_collector/ebpf_collector.c | 115 ++++-
src/c/ebpf_collector/ebpf_collector.h | 11 +-
3 files changed, 607 insertions(+), 25 deletions(-)
diff --git a/src/c/ebpf_collector/ebpf_collector.bpf.c b/src/c/ebpf_collector/ebpf_collector.bpf.c
index a7ea51b..e561d0d 100644
--- a/src/c/ebpf_collector/ebpf_collector.bpf.c
+++ b/src/c/ebpf_collector/ebpf_collector.bpf.c
@@ -20,6 +20,9 @@
#include "bpf_helpers.h"
#include "ebpf_collector.h"
+#define VERSION_KY_V2401 1
+#define VERSION_KY_V2101 2
+
#define _(P) ({typeof(P) val; bpf_probe_read(&val, sizeof(val), &P); val;})
struct bpf_map_def SEC("maps") blk_map = {
@@ -120,18 +123,15 @@ struct bpf_map_def SEC("maps") tag_res_2 = {
.max_entries = MAX_IO_TIME,
};
-struct blk_mq_alloc_data {
- /* input parameter */
- struct request_queue *q;
- blk_mq_req_flags_t flags;
- unsigned int shallow_depth;
-
- /* input & output parameter */
- struct blk_mq_ctx *ctx;
- struct blk_mq_hw_ctx *hctx;
+struct bpf_map_def SEC("maps") version_res = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(32),
+ .value_size = sizeof(int),
+ .max_entries = MAX_IO_TIME,
};
-struct request_kylin {
+// 麒麟v2101平台
+struct request_kylin_v2101 {
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
@@ -249,7 +249,7 @@ struct request_kylin {
void *end_io_data;
/* for bidi */
- struct request_kylin *next_rq;
+ struct request_kylin_v2101 *next_rq;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
@@ -258,6 +258,415 @@ struct request_kylin {
KABI_RESERVE(2);
};
+struct blk_mq_alloc_data {
+ /* input parameter */
+ struct request_queue *q;
+ blk_mq_req_flags_t flags;
+ unsigned int shallow_depth;
+
+ /* input & output parameter */
+ struct blk_mq_ctx *ctx;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+// 麒麟v2401平台
+struct request_kylin_v2401 {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+
+ unsigned int cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
+
+ int internal_tag;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ int tag;
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ struct list_head queuelist;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct list_head ipi_list;
+ };
+
+ struct hlist_node front_hash; /* front merge hash */
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. So let the
+ * completion_data share space with the rb_node.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
+ void *completion_data;
+ int error_count; /* for legacy drivers, don't use */
+ };
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it. Flush requests are
+ * never put on the IO scheduler. So let the flush fields share
+ * space with the elevator data.
+ */
+ union {
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ struct list_head list;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+ };
+
+ struct gendisk *rq_disk;
+ struct hd_struct *part;
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ /* Time that the first bio started allocating this request. */
+ u64 alloc_time_ns;
+#endif
+ /* Time that this request was allocated for this IO. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ unsigned short throtl_size;
+#endif
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ unsigned short nr_integrity_segments;
+#endif
+
+ unsigned short write_hint;
+ unsigned short ioprio;
+
+ void *special; /* opaque pointer available for LLD use */
+
+ unsigned int extra_len; /* length of alignment and padding */
+
+ enum mq_rq_state state;
+ refcount_t ref;
+
+ unsigned int timeout;
+
+ /* access through blk_rq_set_deadline, blk_rq_deadline */
+ unsigned long __deadline;
+
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+
+ /* for bidi */
+ struct request_kylin_v2401 *next_rq;
+ KABI_RESERVE(1);
+ KABI_RESERVE(2);
+};
+
+struct request_queue_kylin_v2401 {
+ /*
+ * Together with queue_head for cacheline sharing
+ */
+ struct list_head queue_head;
+ struct request *last_merge;
+ struct elevator_queue *elevator;
+
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+
+ make_request_fn *make_request_fn;
+ poll_q_fn *poll_fn;
+ dma_drain_needed_fn *dma_drain_needed;
+
+ const struct blk_mq_ops *mq_ops;
+
+ /* sw queues */
+ struct blk_mq_ctx __percpu *queue_ctx;
+ unsigned int nr_queues;
+
+ unsigned int queue_depth;
+
+ /* hw dispatch queues */
+ struct blk_mq_hw_ctx **queue_hw_ctx;
+ unsigned int nr_hw_queues;
+
+ struct backing_dev_info_kylin_v2401 *backing_dev_info;
+
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
+
+ /*
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+ /*
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
+ * processed.
+ */
+ atomic_t pm_only;
+
+ /*
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
+ */
+ int id;
+
+ /*
+ * queue needs bounce pages for pages above this limit
+ */
+ gfp_t bounce_gfp;
+
+ /*
+ * protects queue structures from reentrancy. ->__queue_lock should
+ * _never_ be used directly, it is queue private. always use
+ * ->queue_lock.
+ */
+ spinlock_t __queue_lock;
+ spinlock_t *queue_lock;
+
+ /*
+ * queue kobject
+ */
+ struct kobject kobj;
+
+ /*
+ * mq queue kobject
+ */
+ struct kobject *mq_kobj;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity integrity;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+#ifdef CONFIG_PM
+ struct device *dev;
+ int rpm_status;
+ unsigned int nr_pending;
+#endif
+
+ /*
+ * queue settings
+ */
+ unsigned long nr_requests; /* Max # of requests */
+
+ unsigned int dma_drain_size;
+ void *dma_drain_buffer;
+ unsigned int dma_pad_mask;
+ unsigned int dma_alignment;
+
+ unsigned int rq_timeout;
+ int poll_nsec;
+
+ struct blk_stat_callback *poll_cb;
+ struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+
+ struct timer_list timeout;
+ struct work_struct timeout_work;
+
+ atomic_t nr_active_requests_shared_sbitmap;
+
+ struct list_head icq_list;
+#ifdef CONFIG_BLK_CGROUP
+ DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
+ struct blkcg_gq *root_blkg;
+ struct list_head blkg_list;
+#endif
+
+ struct queue_limits limits;
+
+ unsigned int required_elevator_features;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ /*
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit clear) or
+ * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched. All three fields are
+ * initialized by the low level device driver (e.g. scsi/sd.c).
+ * Stacking drivers (device mappers) may or may not initialize
+ * these fields.
+ *
+ * Reads of this information must be protected with blk_queue_enter() /
+ * blk_queue_exit(). Modifying this information is only allowed while
+ * no requests are being processed. See also blk_mq_freeze_queue() and
+ * blk_mq_unfreeze_queue().
+ */
+ unsigned int nr_zones;
+ unsigned long *seq_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+ /*
+ * sg stuff
+ */
+ unsigned int sg_timeout;
+ unsigned int sg_reserved_size;
+ int node;
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+ struct blk_trace __rcu *blk_trace;
+ struct mutex blk_trace_mutex;
+#endif
+ /*
+ * for flush operations
+ */
+ struct blk_flush_queue *fq;
+
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ struct delayed_work requeue_work;
+
+ struct mutex sysfs_lock;
+
+ /*
+ * for reusing dead hctx instance in case of updating
+ * nr_hw_queues
+ */
+ struct list_head unused_hctx_list;
+ spinlock_t unused_hctx_lock;
+
+ int mq_freeze_depth;
+
+#if defined(CONFIG_BLK_DEV_BSG)
+ struct bsg_class_device bsg_dev;
+#endif
+
+#ifdef CONFIG_BLK_DEV_THROTTLING
+ /* Throttle data */
+ struct throtl_data *td;
+#endif
+ struct rcu_head rcu_head;
+ wait_queue_head_t mq_freeze_wq;
+ /*
+ * Protect concurrent access to q_usage_counter by
+ * percpu_ref_kill() and percpu_ref_reinit().
+ */
+ struct mutex mq_freeze_lock;
+ struct percpu_ref q_usage_counter;
+ struct list_head all_q_node;
+
+ struct blk_mq_tag_set *tag_set;
+ struct list_head tag_set_list;
+ struct bio_set bio_split;
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ struct dentry *debugfs_dir;
+ struct dentry *sched_debugfs_dir;
+#endif
+
+ bool mq_sysfs_init_done;
+
+ size_t cmd_size;
+ void *rq_alloc_data;
+
+ struct work_struct release_work;
+
+#ifdef CONFIG_BLK_BIO_DISPATCH_ASYNC
+ /* used when QUEUE_FLAG_DISPATCH_ASYNC is set */
+ struct cpumask dispatch_async_cpus;
+ int __percpu *last_dispatch_cpu;
+#endif
+
+#define BLK_MAX_WRITE_HINTS 5
+ u64 write_hints[BLK_MAX_WRITE_HINTS];
+
+ KABI_RESERVE(1);
+ KABI_RESERVE(2);
+ KABI_RESERVE(3);
+ KABI_RESERVE(4);
+};
+
+struct backing_dev_info_kylin_v2401 {
+ u64 id;
+ struct rb_node rb_node; /* keyed by ->id */
+ struct list_head bdi_list;
+ unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
+ unsigned long io_pages; /* max allowed IO size */
+ congested_fn *congested_fn; /* Function pointer if device is md/dm */
+ void *congested_data; /* Pointer to aux data for congested func */
+
+ const char *name;
+
+ struct kref refcnt; /* Reference counter for the structure */
+ unsigned int capabilities; /* Device capabilities */
+ unsigned int min_ratio;
+ unsigned int max_ratio, max_prop_frac;
+
+ /*
+ * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
+ * any dirty wbs, which is depended upon by bdi_has_dirty().
+ */
+ atomic_long_t tot_write_bandwidth;
+
+ struct bdi_writeback wb; /* the root writeback info for this bdi */
+ struct list_head wb_list; /* list of all wbs */
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
+ struct rb_root cgwb_congested_tree; /* their congested states */
+ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
+ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
+#else
+ struct bdi_writeback_congested *wb_congested;
+#endif
+ wait_queue_head_t wb_waitq;
+
+ union {
+ struct rcu_device *rcu_dev;
+ struct device *dev;
+ };
+ struct device *owner;
+
+ struct timer_list laptop_mode_wb_timer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_dir;
+ struct dentry *debug_stats;
+#endif
+
+ KABI_RESERVE(1)
+ KABI_RESERVE(2)
+ KABI_RESERVE(3)
+ KABI_RESERVE(4)
+};
+
static __always_inline void blk_fill_rwbs(char *rwbs, unsigned int op)
{
switch (op & REQ_OP_MASK) {
@@ -836,8 +1245,22 @@ u32 find_matching_wbt_5_keys(int major, int minor) {
SEC("kprobe/blk_mq_start_request")
int kprobe_blk_mq_start_request(struct pt_regs *regs)
{
- struct request_kylin *rq = (struct request_kylin *)PT_REGS_PARM1(regs);
+ struct request *rq = (struct request *)PT_REGS_PARM1(regs);
struct gendisk *curr_rq_disk = _(rq->rq_disk);
+
+ u32 key_version = 1;
+ struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version);
+ if (version_map) {
+ if (version_map->num == VERSION_KY_V2401) {
+ struct request_kylin_v2401 *rq = (struct request_kylin_v2401 *)PT_REGS_PARM1(regs);
+ curr_rq_disk = _(rq->rq_disk);
+ } else if (version_map->num == VERSION_KY_V2101) {
+ struct request_kylin_v2101 *rq = (struct request_kylin_v2101 *)PT_REGS_PARM1(regs);
+ curr_rq_disk = _(rq->rq_disk);
+ }
+
+ }
+
int major = _(curr_rq_disk->major);
int first_minor = _(curr_rq_disk->first_minor);
unsigned int cmd_flags = _(rq->cmd_flags);
@@ -916,8 +1339,21 @@ int kprobe_blk_mq_start_request(struct pt_regs *regs)
SEC("kprobe/blk_mq_free_request")
int kprobe_blk_mq_free_request(struct pt_regs *regs)
{
- struct request_kylin *rq = (struct request_kylin *)PT_REGS_PARM1(regs);
+ struct request *rq = (struct request *)PT_REGS_PARM1(regs);
struct gendisk *curr_rq_disk = _(rq->rq_disk);
+
+ u32 key_version = 1;
+ struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version);
+ if (version_map) {
+ if (version_map->num == VERSION_KY_V2401) {
+ struct request_kylin_v2401 *rq = (struct request_kylin_v2401 *)PT_REGS_PARM1(regs);
+ curr_rq_disk = _(rq->rq_disk);
+ } else if (version_map->num == VERSION_KY_V2101) {
+ struct request_kylin_v2101 *rq = (struct request_kylin_v2101 *)PT_REGS_PARM1(regs);
+ curr_rq_disk = _(rq->rq_disk);
+ }
+ }
+
int major = _(curr_rq_disk->major);
int first_minor = _(curr_rq_disk->first_minor);
unsigned int cmd_flags = _(rq->cmd_flags);
@@ -1357,14 +1793,25 @@ int kretprobe_wbt_wait(struct pt_regs *regs)
// start get_tag
SEC("kprobe/blk_mq_get_tag")
int kprobe_blk_mq_get_tag(struct pt_regs *regs)
-{
+{
u64 tagkey = bpf_get_current_task();
u64 value = (u64)PT_REGS_PARM1(regs);
(void)bpf_map_update_elem(&tag_args, &tagkey, &value, BPF_ANY);
struct blk_mq_alloc_data *bd= (struct blk_mq_alloc_data *)value;
- struct request_queue *q = _(bd->q);
- struct backing_dev_info *backing_dev_info = _(q->backing_dev_info);
+ struct request_queue *q = (struct request_queue *)_(bd->q);
+ struct backing_dev_info *backing_dev_info = (struct backing_dev_info *)_(q->backing_dev_info);
struct device *owner = _(backing_dev_info->owner);
+
+ u32 key_version = 1;
+ struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version);
+ if (version_map) {
+ if (version_map->num == VERSION_KY_V2401) {
+ struct request_queue_kylin_v2401 *q = (struct request_queue_kylin_v2401 *)_(bd->q);
+ struct backing_dev_info_kylin_v2401 *backing_dev_info = (struct backing_dev_info_kylin_v2401 *)_(q->backing_dev_info);
+ owner = _(backing_dev_info->owner);
+ }
+ }
+
dev_t devt = _(owner->devt);
int major = MAJOR(devt);
int first_minor = MINOR(devt);
@@ -1391,11 +1838,14 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs)
init_io_counter(&zero, major, first_minor);
counterp = bpf_map_lookup_elem(&tag_map, &tagkey);
- if (counterp || major == 0)
+ if (counterp || major == 0) {
return 0;
+ }
+
long err = bpf_map_update_elem(&tag_map, &tagkey, &zero, BPF_NOEXIST);
- if (err)
+ if (err) {
return 0;
+ }
u64 curr_start_range = zero.start_time / THRESHOLD;
@@ -1434,7 +1884,6 @@ int kprobe_blk_mq_get_tag(struct pt_regs *regs)
__sync_fetch_and_add(&curr_data_time_range->count[key], 1);
}
}
-
return 0;
}
@@ -1449,12 +1898,23 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs)
tagargs = (u64 *)bpf_map_lookup_elem(&tag_args, &tagkey);
if (tagargs == NULL) {
bpf_map_delete_elem(&tag_args, &tagkey);
- return 0;
+ return 0;
}
bd = (struct blk_mq_alloc_data *)(*tagargs);
struct request_queue *q = _(bd->q);
- struct backing_dev_info *backing_dev_info = _(q->backing_dev_info);
+ struct backing_dev_info *backing_dev_info = _(q->backing_dev_info);
struct device *owner = _(backing_dev_info->owner);
+
+ u32 key_version = 1;
+ struct version_map_num *version_map = bpf_map_lookup_elem(&version_res, &key_version);
+ if (version_map) {
+ if (version_map->num == VERSION_KY_V2401) {
+ struct request_queue_kylin_v2401 *q = (struct request_queue_kylin_v2401 *)_(bd->q);
+ struct backing_dev_info_kylin_v2401 *backing_dev_info = (struct backing_dev_info_kylin_v2401 *)_(q->backing_dev_info);
+ owner = _(backing_dev_info->owner);
+ }
+ }
+
dev_t devt = _(owner->devt);
int major = MAJOR(devt);
int first_minor = MINOR(devt);
@@ -1480,8 +1940,9 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs)
counterp = bpf_map_lookup_elem(&tag_map, &tagkey);
- if (!counterp)
+ if (!counterp) {
return 0;
+ }
u64 duration = bpf_ktime_get_ns() - counterp->start_time;
u64 curr_start_range = counterp->start_time / THRESHOLD;
@@ -1542,4 +2003,3 @@ int kretprobe_blk_mq_get_tag(struct pt_regs *regs)
char LICENSE[] SEC("license") = "Dual BSD/GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
-
diff --git a/src/c/ebpf_collector/ebpf_collector.c b/src/c/ebpf_collector/ebpf_collector.c
index 54a3a9e..dbb3e55 100644
--- a/src/c/ebpf_collector/ebpf_collector.c
+++ b/src/c/ebpf_collector/ebpf_collector.c
@@ -35,6 +35,7 @@
#define BIO_RES_2 (map_fd[11])
#define WBT_RES_2 (map_fd[12])
#define TAG_RES_2 (map_fd[13])
+#define VERSION_RES (map_fd[14])
#define BPF_FILE "/usr/lib/ebpf_collector.bpf.o"
#define MAX_LINE_LENGTH 1024
@@ -188,7 +189,7 @@ static int print_map_res(struct bpf_map *map_res, char *stage, int *map_size, in
int first_minor = counter.first_minor;
dev_t dev = makedev(major, first_minor);
char *device_name = find_device_name(dev);
- logMessage(LOG_LEVEL_DEBUG, "device_name: %s\n", device_name);
+ logMessage(LOG_LEVEL_DEBUG, "device_name: %s, stage: %s, io_type: %c\n", device_name, stage, io_type);
if (device_name && io_type) {
printf("%-7s %10llu %10llu %d %c %s\n",
stage,
@@ -222,6 +223,19 @@ int init_map(int *map_fd, const char *map_name, int *map_size, DeviceInfo *devic
return 0;
}
+int init_version_map(int *map_fd, const char *map_name, int os_num) {
+ struct version_map_num init_data = {0};
+ init_data.num = os_num;
+
+ u32 key = 1;
+ if (bpf_map_update_elem(map_fd, &key, &init_data, BPF_ANY) != 0) {
+ logMessage(LOG_LEVEL_ERROR, "Failed to initialize map %s at index %d\n", map_name);
+ return 1;
+ }
+
+ return 0;
+}
+
char *read_config_value(const char *file, const char *section, const char *key) {
FILE *fp = fopen(file, "r");
if (fp == NULL) {
@@ -317,6 +331,94 @@ int check_for_device(const char *device_name) {
return 0;
}
+typedef struct {
+ const char *version;
+ int value;
+} VersionMap;
+
+const VersionMap version_map[] = {
+ {"v2401", 1},
+ {"v2101", 2}
+ };
+
+char *get_minor_version(int index, char *buffer) {
+ char *version_info = NULL;
+ char* token = strtok(buffer, " ");
+ int count = 0;
+ while (token != NULL) {
+ token = strtok(NULL, " ");
+ count++;
+ if (count == 2) {
+ char* version = strtok(token, ".");
+ int dot_count = 0;
+ while (version != NULL) {
+ version = strtok(NULL, ".");
+ dot_count++;
+ if (dot_count == index) {
+ version_info = strdup(version);
+ break;
+ }
+ }
+ }
+ }
+ return version_info;
+}
+
+int get_os_version() {
+ FILE* file;
+ char* distribution = NULL;
+ char buffer[BUFFER_SIZE];
+
+ file = fopen(OS_RELEASE_FILE, "r");
+ if (file == NULL) {
+ logMessage(LOG_LEVEL_ERROR, "Failed to open release file: %s\n", OS_RELEASE_FILE);
+ return -1;
+ }
+
+ while (fgets(buffer, BUFFER_SIZE, file)) {
+ if (strncmp(buffer, "ID=", 3) == 0) {
+ distribution = strdup(buffer + 4);
+ distribution[strcspn(distribution, "\"\n")] = '\0';
+ break;
+ }
+ }
+ fclose(file);
+
+ char* version_info = NULL;
+ int value = -1;
+
+ file = fopen(PROC_VERSION_FILE, "r");
+ if (file == NULL) {
+ logMessage(LOG_LEVEL_ERROR, "Failed to open version file: %s\n", PROC_VERSION_FILE);
+ return -1;
+ }
+
+ if (fgets(buffer, BUFFER_SIZE, file)) {
+ if (strcmp(distribution, "openEuler") == 0) {
+ free(distribution);
+ return 0;
+ } else if (strcmp(distribution, "kylin") == 0) {
+ version_info = get_minor_version(4, buffer);
+ if (!version_info) {
+ logMessage(LOG_LEVEL_ERROR, "get minor version failed.\n");
+ free(distribution);
+ return -1;
+ }
+ }
+ }
+ free(distribution);
+ fclose(file);
+
+ for (int i = 0; version_map[i].version != NULL; ++i) {
+ if (strcmp(version_map[i].version, version_info) == 0) {
+ value = version_map[i].value;
+ break;
+ }
+ }
+ free(version_info);
+ return value;
+}
+
int main(int argc, char **argv) {
struct partitions *partitions = NULL;
const struct partition *partition;
@@ -355,6 +457,12 @@ int main(int argc, char **argv) {
return err;
}
+ int os_num = get_os_version();
+ if (os_num < 0) {
+ logMessage(LOG_LEVEL_INFO, "get os version failed.\n");
+ return 1;
+ }
+
snprintf(filename, sizeof(filename), BPF_FILE);
if (load_bpf_file(filename)) {
@@ -414,6 +522,11 @@ int main(int argc, char **argv) {
logMessage(LOG_LEVEL_ERROR, "tag_res_map failed.\n");
return 1;
}
+ if (init_version_map(VERSION_RES, "version_res_map", os_num) != 0) {
+ logMessage(LOG_LEVEL_ERROR, "version_res_map failed.\n");
+ return 1;
+ }
+
for (;;) {
diff --git a/src/c/ebpf_collector/ebpf_collector.h b/src/c/ebpf_collector/ebpf_collector.h
index fcebc93..904f8e4 100644
--- a/src/c/ebpf_collector/ebpf_collector.h
+++ b/src/c/ebpf_collector/ebpf_collector.h
@@ -32,6 +32,11 @@ typedef unsigned int u32;
#define REQ_OP_WRITE_SAME 7
#define MAP_SIZE 15
+#define OS_RELEASE_FILE "/etc/os-release"
+#define PROC_VERSION_FILE "/proc/version"
+#define BUFFER_SIZE 1024
+#define VERSION_LEN 20
+
enum stage_type {
BIO=0,
WBT,
@@ -73,5 +78,9 @@ struct time_range_io_count
u32 count[MAP_SIZE];
};
-#endif /* __EBPFCOLLECTOR_H */
+struct version_map_num
+{
+ int num;
+};
+#endif /* __EBPFCOLLECTOR_H */
--
2.27.0