diff --git a/build.sh b/build.sh index 079cef3cce1827a50f81f3bda3c62a00342aae5b..4a13be5dd0ffa719d568965afd41c186ae1d4ece 100644 --- a/build.sh +++ b/build.sh @@ -129,6 +129,12 @@ build_libkperf() if [ "${PYTHON}" = "true" ];then CMAKE_ARGS+=("-DPYTHON_WHL=${WHL}") fi + if [ "${BPF}" = "true" ];then + CMAKE_ARGS+=( + "-DCMAKE_INSTALL_RPATH=${INSTALL_PATH}lib;${THIRD_PARTY}local/bpf/usr/lib64" + "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=TRUE" + ) + fi cmake "${CMAKE_ARGS[@]}" .. make -j ${cpu_core_num} make install diff --git a/pmu/bpf/evt_list_bpf.cpp b/pmu/bpf/evt_list_bpf.cpp index 607ebf7fab605e9c37d990831d5619b2c9577ec8..40524ff816f8f54d972593ef621c75c2238479e1 100644 --- a/pmu/bpf/evt_list_bpf.cpp +++ b/pmu/bpf/evt_list_bpf.cpp @@ -41,24 +41,25 @@ int KUNPENG_PMU::EvtListBpf::Init(const bool groupEnable, const std::shared_ptr< continue; } - int err = 0; - err = perfEvt->Init(groupEnable, -1, -1); + int err = perfEvt->Init(groupEnable, -1, -1); if (err != SUCCESS) { return err; } this->cpuCounterArray.emplace_back(perfEvt); } - for (unsigned int pid = 0; pid < numPid; pid++) { - PerfEvtPtr perfEvt = - std::make_shared(-1, this->pidList[pid]->tid, this->pmuEvt.get(), procMap); - if (perfEvt == nullptr) { - continue; - } + this->allPids.resize(pidList.size()); + for (size_t i = 0; i < pidList.size(); i++) { + this->allPids[i] = pidList[i]->tid; + } - perfEvt->Init(groupEnable, -1, -1); // init pid, ignore the result of perf_event_open - this->pidCounterArray.emplace_back(perfEvt); + PerfEvtPtr perfEvt =std::make_shared(-1, -1, this->pmuEvt.get(), procMap); + int err = std::dynamic_pointer_cast(perfEvt)->InitPidForEvent(allPids); + if (err != SUCCESS) { + return err; } + + this->pidCounterArray.emplace_back(perfEvt); return SUCCESS; } @@ -115,52 +116,48 @@ int KUNPENG_PMU::EvtListBpf::Close() int KUNPENG_PMU::EvtListBpf::Read(EventData &eventData) { std::unique_lock lg(mutex); + auto perfEvt = this->pidCounterArray[0]; + int err = perfEvt->BeginRead(); + if (err != SUCCESS) { + return err; + } - for (unsigned int pid = 0; pid < numPid; pid++) { - int err = this->pidCounterArray[pid]->BeginRead(); - if (err != SUCCESS) { - return err; - } + auto cpuTopo = this->cpuList[0].get(); + size_t oldSize = eventData.data.size(); + if (pmuEvt->cgroupName.empty()) { + err = std::dynamic_pointer_cast( + this->pidCounterArray[0])->ReadBpfProcess(this->allPids, eventData.data); + } else { + err = std::dynamic_pointer_cast( + this->pidCounterArray[0])->ReadBpfCgroup(eventData.data); + } + if (err != SUCCESS) { + return err; } - struct PmuEvtData* head = nullptr; - int row = 0; - auto cpuTopo = this->cpuList[row].get(); - for (unsigned int pid = 0; pid < numPid; pid++) { - auto cnt = eventData.data.size(); - int err = this->pidCounterArray[pid]->Read(eventData); - if (err != SUCCESS) { - return err; + const char* evtName = pmuEvt->name.c_str(); + uint64_t tsVal = this->ts; + for (size_t i = oldSize; i < eventData.data.size(); i++) { + auto& d = eventData.data[i]; + DBG_PRINT("evt: %s pid: %d cpu: %d\n", evtName, d.pid, d.cpu); + + d.cpuTopo = cpuTopo; + d.evt = evtName; + if (!d.comm) { + auto it = procMap.find(d.tid); + if (it != procMap.end()) { + d.comm = it->second->comm; + } } - if (eventData.data.size() - cnt) { - DBG_PRINT("evt: %s pid: %d cpu: %d samples num: %d\n", pmuEvt->name.c_str(), pidList[pid]->pid, - cpuTopo->coreId, eventData.data.size() - cnt); + if (d.ts == 0) { + d.ts = tsVal; } - // Fill event name and cpu topology. - FillFields(cnt, eventData.data.size(), cpuTopo, pidList[pid].get(), eventData.data); } - for (unsigned int pid = 0; pid < numPid; pid++) { - int err = this->pidCounterArray[pid]->EndRead(); - if (err != SUCCESS) { - return err; - } + err = perfEvt->EndRead(); + if (err != SUCCESS) { + return err; } return SUCCESS; } - -void KUNPENG_PMU::EvtListBpf::FillFields( - size_t start, size_t end, CpuTopology* cpuTopo, ProcTopology* procTopo, vector& data) -{ - for (auto i = start; i < end; ++i) { - data[i].cpuTopo = cpuTopo; - data[i].evt = this->pmuEvt->name.c_str(); - if (data[i].comm == nullptr) { - data[i].comm = procTopo->comm; - } - if (data[i].ts == 0) { - data[i].ts = this->ts; - } - } -} \ No newline at end of file diff --git a/pmu/bpf/evt_list_bpf.h b/pmu/bpf/evt_list_bpf.h index 47ff1f9a8c8badd797fd142cdb14cab3cf1914b5..74cd8bce026e8f832219c2986444a90d952438ed 100644 --- a/pmu/bpf/evt_list_bpf.h +++ b/pmu/bpf/evt_list_bpf.h @@ -53,6 +53,7 @@ public: void RemoveInitErr() override {}; private: + std::vector allPids; std::vector> cpuCounterArray; std::vector> pidCounterArray; int CollectorTaskArrayDoTask(std::vector& taskArray, int task); diff --git a/pmu/bpf/perf_counter_bpf.cpp b/pmu/bpf/perf_counter_bpf.cpp index 9e844d9c7d70c73bc934f01f868fa82879aec379..4b705b4a72bed0aa380f27045c76139775079e59 100644 --- a/pmu/bpf/perf_counter_bpf.cpp +++ b/pmu/bpf/perf_counter_bpf.cpp @@ -40,13 +40,14 @@ using namespace std; using namespace pcerr; #define MAX_ENTITES 102400 +#define MAX_CPU_LIMIT 1024 -static map counterMap; // key: evt name, value: bpf obj -static struct sched_cgroup_bpf *cgrpCounter = nullptr; // one bpf obj in cgroup mode +static unordered_map counterMap; // key: evt name, value: bpf obj +static struct sched_cgroup_bpf *cgrpCounter = nullptr; // one bpf obj in cgroup mode static std::unordered_map evtDataMap; -static set evtKeys; // updated fds of cgroup -static set readCgroups; -static set triggerdEvt; +static unordered_set evtKeys; // updated fds of cgroup +static unordered_set readCgroups; +static unordered_set triggerdEvt; static int evtIdx = 0; static int cgrpProgFd = 0; @@ -75,9 +76,18 @@ int KUNPENG_PMU::PerfCounterBpf::EndRead() return SUCCESS; } -int KUNPENG_PMU::PerfCounterBpf::ReadBpfProcess(std::vector &data) +inline int CachedCpuCount() { - const unsigned cpuNums = MAX_CPU_NUM; + static int cached = []{ + long n = sysconf(_SC_NPROCESSORS_CONF); + return (n > 0) ? (int)n : 1; + }(); + return cached > MAX_CPU_LIMIT ? MAX_CPU_LIMIT : cached; +} + +int KUNPENG_PMU::PerfCounterBpf::ReadBpfProcess(const std::vector& pids, std::vector& data) +{ + const unsigned cpuNums = CachedCpuCount(); auto obj = counterMap[this->evt->name]; // must execute sched_switch when each read operation. @@ -91,44 +101,34 @@ int KUNPENG_PMU::PerfCounterBpf::ReadBpfProcess(std::vector &data) } triggerdEvt.insert(this->evt->name); } - - // read the pmu count of this pid in each cpu core - struct bpf_perf_event_value values[cpuNums]; - int err = bpf_map__lookup_elem( - obj->maps.accum_readings, &this->pid, sizeof(__u32), values, sizeof(bpf_perf_event_value) * cpuNums, BPF_ANY); - if (err) { - New(LIBPERF_ERR_BPF_ACT_FAILED, "failed to lookup counter map accum_readings. Error: " - + string(strerror(-err)) + " pid " + to_string(this->pid)); - return LIBPERF_ERR_BPF_ACT_FAILED; - } + static const std::vector zeros(cpuNums, bpf_perf_event_value{}); + for (int tid : pids) { + std::vector values(cpuNums, bpf_perf_event_value{}); + if (bpf_map_lookup_elem(bpf_map__fd(obj->maps.accum_readings), &tid, values.data())) { + continue; + } - // convert pmu count to PmuData - int processId = 0; - auto findProc = procMap.find(this->pid); - if (findProc != procMap.end()) { - processId = findProc->second->pid; - } + int processId = 0; + auto it = procMap.find(tid); + if (it != procMap.end()) { + processId = it->second->pid; + } + for (int cpu = 0; cpu < cpuNums; ++cpu) { + if (values[cpu].counter == 0) continue; + data.emplace_back(PmuData{ + .pid = processId, + .tid = tid, + .cpu = cpu, + .count = values[cpu].counter, + .countPercent = values[cpu].enabled ? + (double)values[cpu].running / values[cpu].enabled : 0.0, + }); + } - for (int i = 0; i < cpuNums; i++) { - data.emplace_back(PmuData{0}); - auto ¤t = data.back(); - current.count = values[i].counter; - current.countPercent = values[i].running / values[i].enabled; - current.cpu = i; - current.tid = this->pid; - current.pid = processId; + bpf_map_update_elem(bpf_map__fd(obj->maps.accum_readings), &tid, zeros.data(), BPF_EXIST); } - // reset pmu count in bpf to ensure that the value read from pmu is delta (after last read/open) - memset(values, 0, MAX_CPU_NUM * sizeof(bpf_perf_event_value)); - err = bpf_map__update_elem( - obj->maps.accum_readings, &pid, sizeof(__u32), values, sizeof(bpf_perf_event_value) * cpuNums, BPF_ANY); - if (err) { - New(LIBPERF_ERR_BPF_ACT_FAILED, "failed to update counter map accum_readings. Error: " - + string(strerror(-err)) + " pid " + to_string(this->pid)); - return LIBPERF_ERR_BPF_ACT_FAILED; - } return SUCCESS; } @@ -140,7 +140,7 @@ int KUNPENG_PMU::PerfCounterBpf::ReadBpfCgroup(std::vector &data) } readCgroups.insert(cgrpName); - for (int i=0;i &data) return SUCCESS; } - for (int i = 0; i < cpuNums; i++) { - data.emplace_back(PmuData{0}); - auto ¤t = data.back(); - current.count = values[i].counter; - current.countPercent = values[i].running / values[i].enabled; - current.cpu = i; - current.tid = this->pid; - current.cgroupName = this->evt->cgroupName.c_str(); + for (int cpu = 0; cpu < cpuNums; ++cpu) { + data.emplace_back(PmuData{ + .tid = this->pid, + .cpu = cpu, + .count = values[cpu].counter, + .countPercent = values[cpu].enabled ? + (double)values[cpu].running / values[cpu].enabled : 0.0, + .cgroupName = this->evt->cgroupName.c_str(), + }); } memset(values, 0, cpuNums * sizeof(bpf_perf_event_value)); @@ -180,11 +181,7 @@ int KUNPENG_PMU::PerfCounterBpf::ReadBpfCgroup(std::vector &data) int KUNPENG_PMU::PerfCounterBpf::Read(EventData &eventData) { - if (!evt->cgroupName.empty()) { - return ReadBpfCgroup(eventData.data); - } else { - return ReadBpfProcess(eventData.data); - } + return SUCCESS; } static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) @@ -192,13 +189,9 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va return vfprintf(stderr, format, args); } -int KUNPENG_PMU::PerfCounterBpf::InitPidForEvent() +int KUNPENG_PMU::PerfCounterBpf::InitPidForEvent(const std::vector& pids) { - if (this->pid == -1) { - return SUCCESS; - } - - if (evtDataMap[this->evt->name].pids.find(this->pid) != evtDataMap[this->evt->name].pids.end()) { + if (!this->evt->cgroupName.empty()) { return SUCCESS; } @@ -206,26 +199,53 @@ int KUNPENG_PMU::PerfCounterBpf::InitPidForEvent() if (findObj == counterMap.end()) { return -1; } + auto obj = findObj->second; + int map_fd = bpf_map__fd(obj->maps.accum_readings); + // initialize the cumulative pmu count for this pid, only once + int cpu_num = CachedCpuCount(); + + std::vector<__u32> keys; + keys.reserve(pids.size()); + for (int pid : pids) { + keys.push_back(static_cast<__u32>(pid)); + } - // initialize the cumulative pmu count for this pid - struct bpf_perf_event_value evtVal[MAX_CPU_NUM]; + std::vector values(keys.size() * cpu_num, bpf_perf_event_value{}); + __u32 cnt = static_cast<__u32>(keys.size()); + struct bpf_map_batch_opts opts { + .sz = sizeof(struct bpf_map_batch_opts), + .flags = BPF_NOEXIST, + }; - memset(evtVal, 0, MAX_CPU_NUM * sizeof(bpf_perf_event_value)); - int err = bpf_map__update_elem(findObj->second->maps.accum_readings, &pid, sizeof(__u32), evtVal, - sizeof(bpf_perf_event_value) * MAX_CPU_NUM, BPF_NOEXIST); + // initialize the cumulative pmu count for this pid + int err = bpf_map_update_batch(map_fd, keys.data(), values.data(), &cnt, &opts); if (err) { - New(LIBPERF_ERR_BPF_ACT_FAILED, "failed to update counter map accum_readings. Error: " + err); - return LIBPERF_ERR_BPF_ACT_FAILED; + // batch error, rollback to update elem one by one + for (auto pid : keys) { + std::vector zeroVals(cpu_num, bpf_perf_event_value{}); + int err = bpf_map__update_elem(obj->maps.accum_readings, + &pid, sizeof(__u32), + zeroVals.data(), + sizeof(bpf_perf_event_value) * cpu_num, + BPF_NOEXIST); + if (err && err != -EEXIST) { + New(LIBPERF_ERR_BPF_ACT_FAILED, "failed to update counter map accum_readings. Error: " + std::to_string(err)); + return LIBPERF_ERR_BPF_ACT_FAILED; + } + } } // initialize the filter, build the map relationship of pid and accum_key - err = bpf_map__update_elem(findObj->second->maps.filter, &pid, sizeof(__u32), &pid, sizeof(__u32), BPF_NOEXIST); - if (err) { - New(LIBPERF_ERR_BPF_ACT_FAILED, "failed to update counter map filter. Error: " + err); - return LIBPERF_ERR_BPF_ACT_FAILED; + for (auto pid : keys) { + int err = bpf_map__update_elem(obj->maps.filter,&pid, sizeof(__u32), &pid, sizeof(__u32), BPF_NOEXIST); + if (err && err != -EEXIST) { + New(LIBPERF_ERR_BPF_ACT_FAILED,"failed to update counter map filter. Error: " + std::to_string(err)); + return LIBPERF_ERR_BPF_ACT_FAILED; + } + DBG_PRINT("Init pid %d For eventId: %s\n", pid, this->evt->name.c_str()); + evtDataMap[this->evt->name].pids.insert(pid); } - DBG_PRINT("InitPidForEvent: %d\n", pid); - evtDataMap[this->evt->name].pids.insert(this->pid); + return SUCCESS; } @@ -233,8 +253,9 @@ int KUNPENG_PMU::PerfCounterBpf::InitBpfObj() { int err; struct sched_counter_bpf *obj; - auto findObj = counterMap.find(evt->name); - if (findObj == counterMap.end()) { + + auto findObj = counterMap[this->evt->name]; + if (!findObj) { // initialize the bpf obj obj = sched_counter_bpf__open(); if (!obj) { @@ -251,7 +272,7 @@ int KUNPENG_PMU::PerfCounterBpf::InitBpfObj() New(LIBPERF_ERR_BPF_ACT_FAILED, "failed to set max entries of counter map: prev_readings"); return LIBPERF_ERR_BPF_ACT_FAILED; } - err = bpf_map__set_max_entries(obj->maps.accum_readings, 1024); + err = bpf_map__set_max_entries(obj->maps.accum_readings, this->procMap.size()); if (err) { New(LIBPERF_ERR_BPF_ACT_FAILED, "failed to set max entries of counter map: accum_readings"); return LIBPERF_ERR_BPF_ACT_FAILED; @@ -275,17 +296,13 @@ int KUNPENG_PMU::PerfCounterBpf::InitBpfObj() } counterMap[this->evt->name] = obj; - err = InitPidForEvent(); - if (err == LIBPERF_ERR_BPF_ACT_FAILED) { - return err; - } + // get the fd of bpf prog, trigger trace function(sched_switch) of bpf in read int progFd = bpf_program__fd(obj->progs.on_switch); - evtDataMap[this->evt->name].bpfFd = progFd; DBG_PRINT("create bpf obj for evt %s prog fd %d\n", evt->name.c_str(), progFd); } else { - obj = counterMap[this->evt->name]; + obj = findObj; } // initialize the pmu count, put fd of pmu into value @@ -414,29 +431,20 @@ int KUNPENG_PMU::PerfCounterBpf::InitBpfCgroupObj() int KUNPENG_PMU::PerfCounterBpf::Init(const bool groupEnable, const int groupFd, const int resetOutputFd) { - int err = InitPidForEvent(); - if (err == LIBPERF_ERR_BPF_ACT_FAILED) { - return err; - } auto findCpuMap = evtDataMap.find(this->evt->name); auto findCgroup = cgroupIdxMap.find(this->evt->cgroupName); if (findCpuMap != evtDataMap.end() && findCpuMap->second.cpus.count(this->cpu) && findCgroup != cgroupIdxMap.end()) { return SUCCESS; } - + if (findCpuMap == evtDataMap.end() || !findCpuMap->second.cpus.count(this->cpu)) { - err = this->MapPerfAttr(groupEnable, groupFd); + int err = this->MapPerfAttr(groupEnable, groupFd); if (err != SUCCESS) { return err; } } - if (this->evt->cgroupName.empty()) { - err = InitBpfObj(); - } else { - err = InitBpfCgroupObj(); - } - return err; + return this->evt->cgroupName.empty() ? InitBpfObj() : InitBpfCgroupObj(); } int KUNPENG_PMU::PerfCounterBpf::MapPerfAttr(const bool groupEnable, const int groupFd) diff --git a/pmu/bpf/perf_counter_bpf.h b/pmu/bpf/perf_counter_bpf.h index 1cf1c23004c106b889090a4d1de336890915d182..55e95c0dc4150803b7d2f668e202cec4cde53ff6 100644 --- a/pmu/bpf/perf_counter_bpf.h +++ b/pmu/bpf/perf_counter_bpf.h @@ -18,6 +18,7 @@ #include #include #include +#include #include "evt.h" #include "pmu_event.h" #include "perf_counter.h" @@ -27,8 +28,8 @@ struct BpfEvent { int bpfFd = -1; int eventId = -1; - std::set cpus; - std::set pids; + std::unordered_set cpus; + std::unordered_set pids; }; namespace KUNPENG_PMU { @@ -47,13 +48,14 @@ namespace KUNPENG_PMU { int BeginRead(); int EndRead(); + int InitPidForEvent(const std::vector& pids); + int ReadBpfProcess(const std::vector& pids, std::vector &data); + int ReadBpfCgroup(std::vector &data); + private: int InitBpfObj(); int InitBpfCgroupObj(); - int InitPidForEvent(); - int ReadBpfProcess(std::vector &data); - int ReadBpfCgroup(std::vector &data); - std::map cgroupIdxMap; // key: cgroup name, value: sequential number + std::unordered_map cgroupIdxMap; // key: cgroup name, value: sequential number }; } // namespace KUNPENG_PMU #endif diff --git a/pmu/bpf/sched_cgroup.bpf.c b/pmu/bpf/sched_cgroup.bpf.c index 79442f6fcc5acb4e0379c75df3ca8ba87ebd32de..ff2afc08e45823c18e86f0456133235ad8cf2ef6 100644 --- a/pmu/bpf/sched_cgroup.bpf.c +++ b/pmu/bpf/sched_cgroup.bpf.c @@ -52,32 +52,48 @@ struct { const volatile __u32 num_events = 1; const volatile __u32 num_cpus = 1; +// new kernel cgroup definition +struct cgroup___new { + int level; + struct cgroup *ancestors[]; +} __attribute__((preserve_access_index)); + +// old kernel cgroup definition +struct cgroup___old { + int level; + u64 ancestor_ids[]; +} __attribute__((preserve_access_index)); + +static inline __u64 get_cgroup_ancestor_id(struct cgroup *cgrp, int level) +{ + // recast pointer to capture new type for compiler + struct cgroup___new *cgrp_new = (void *)cgrp; + if (bpf_core_field_exists(cgrp_new->ancestors)) { + return BPF_CORE_READ(cgrp_new, ancestors[level], kn, id); + } else { + // recast pointer to capture old type for compiler + struct cgroup___old *cgrp_old = (void *)cgrp; + return BPF_CORE_READ(cgrp_old, ancestor_ids[level]); + } +} static inline int get_cgroup_idx(__u32 *cgrps, int size) { struct task_struct *p = (void *)bpf_get_current_task(); struct cgroup *cgrp; - register int i = 0; __u32 *elem; int level; - int cnt; + int cnt = 0; cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup); - level = BPF_CORE_READ(cgrp, level); + if (!cgrp) { + return 0; + } - for (cnt = 0; i < MAX_LEVELS; i++) { - __u64 cgrp_id; + level = BPF_CORE_READ(cgrp, level); - if (i > level) { - break; - } + for (int i = 0; i <= level && i < MAX_LEVELS; i++) { + __u64 cgrp_id = get_cgroup_ancestor_id(cgrp, i); - // convert cgroup-id to a map index - if (bpf_core_field_exists(cgrp->ancestor_ids)) { - cgrp_id = BPF_CORE_READ(cgrp, ancestor_ids[i]); - } else { - bpf_printk("cannot get ancestor_ids, this field not in struct cgroup"); - return 0; - } elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id); if (!elem) { continue; diff --git a/pmu/pmu.cpp b/pmu/pmu.cpp index 3825a6cf57955062b509f9b47383ed69211d945a..4637d75396857c71f620642629af8d9cd83b0978 100644 --- a/pmu/pmu.cpp +++ b/pmu/pmu.cpp @@ -362,6 +362,15 @@ static int CheckBpfMode(enum PmuTaskType collectType, struct PmuAttr *attr) New(LIBPERF_ERR_INVALID_BPF_PARAM, "Bpf mode doesn't support event group now"); return LIBPERF_ERR_INVALID_BPF_PARAM; } + + set events; + for (int i = 0; i < attr->numEvt; i++) { + std::string evt(attr->evtList[i]); + if (!events.insert(evt).second) { + New(LIBPERF_ERR_INVALID_BPF_PARAM, "Bpf mode doesn't support duplicate event names"); + return LIBPERF_ERR_INVALID_BPF_PARAM; + } + } return SUCCESS; } diff --git a/util/process_map.cpp b/util/process_map.cpp index 538617cf03d53208534b86c6792e31e6b44b3974..bb10b5ae80ca9bae403f562858b47e302f786c1e 100644 --- a/util/process_map.cpp +++ b/util/process_map.cpp @@ -53,66 +53,53 @@ int GetTgid(pid_t pid) return -1; } // Get tgid from /proc//status. - std::string filePath = "/proc/" + std::to_string(pid) + "/status"; - std::string realPath = GetRealPath(filePath); - if (!IsValidPath(realPath)) { + char path[64]; + snprintf(path, sizeof(path), "/proc/%d/status", pid); + FILE* f = fopen(path, "r"); + if (!f) { return -1; } - std::ifstream statusFile(realPath); - if (!statusFile.is_open()) { - return -1; - } - string token; - bool foundTgid = false; - while (!statusFile.eof()) { - if (!statusFile.is_open()) { - return -1; - } - statusFile >> token; - if (statusFile.bad()) { - // The file may be successfully opened before while loop, - // but disappear before reading stream. - return -1; - } - if (token == "Tgid:") { - foundTgid = true; - continue; - } - if (foundTgid) { - return stoi(token); + + char line[256]; + while (fgets(line, sizeof(line), f)) { + if (strncmp(line, "Tgid:", 5) == 0) { + char* endptr; + long tgid = strtol(line + 5, &endptr, 10); + fclose(f); + if (endptr == line + 5) { + return -1; + } + return static_cast(tgid); } } + // The file may be successfully opened before while loop, + // but disappear before reading stream. + fclose(f); return -1; } char *GetComm(pid_t pid) { - std::string commName; + static thread_local char buffer[PATH_MAX]; if (pid == -1) { - commName = "system"; - char *comm = static_cast(malloc(commName.length() + 1)); - if (comm == nullptr) { - return nullptr; - } - strcpy(comm, commName.c_str()); - return comm; - } - std::string filePath = "/proc/" + std::to_string(pid) + "/comm"; - std::string realPath = GetRealPath(filePath); - if (!IsValidPath(realPath)) { - return nullptr; + return strdup("system"); } - std::ifstream commFile(realPath); - if (!commFile.is_open()) { + + char path[64]; + snprintf(path, sizeof(path), "/proc/%d/comm", pid); + FILE* f = fopen(path, "r"); + if (!f) { return nullptr; } - commFile >> commName; - char *comm = static_cast(malloc(commName.length() + 1)); - if (comm == nullptr) { + + if (!fgets(buffer, sizeof(buffer), f)) { + fclose(f); return nullptr; } - strcpy(comm, commName.c_str()); - return comm; + fclose(f); + + buffer[strcspn(buffer, "\n")] = '\0'; + return strdup(buffer); } struct ProcTopology *GetProcTopology(pid_t pid)