一、背景
在之前的博客 调度时延的观测_csdn 调度时延的观测 杰克崔-CSDN博客 里,我们讲了多种监控调度时延的方法,有依靠系统现有节点来监控,但是依赖系统现有节点做不到每个单词调度时延的监控,也讲了通过修改内核代码,在内核计算调度时延的地方加逻辑去监控,这里说的加逻辑也可以是新增一个tracepoint点(关于如何新增一个tracepoint点,见 内核tracepoint的注册回调及添加的方法_tracepoint 自定义回调-CSDN博客)。
这篇博客里会讲如何使用现有的tracepoint点来实现调度时延的“高效”的监控。
这里需要强调一下,“高效”,是因为如果不考虑性能的话,我们可以通过注册sched_wakeup和sched_switch这两个调度点,把每个pid的wakeup时间记下来和switch时的时间减一下来算调度时延,但是这样方式,需要保存一个大的数组来记录一个个线程的wakeup的时间,要记录就涉及到记录逻辑和查找逻辑,这些在线程数很多的情况下都是耗时的操作。另外,还得针对因为时间片到期被cpu调度出去,再调度回来的计算调度时延的场景做额外的记录和判断和运算的逻辑,因为这种情况,wakeup事件肯定是统计不到的。这样,逻辑就比较复杂了。
在第二章里,我先讲一下设计原理,然后做了一个小实验,验证了是有效果的。最后,使用per-cpu变量就可以监控每个单次的调度时延了。
二、设计原理和代码实现
2.1 设计原理
在任务切换时,调度的两个tracepoint:sched_switch和sched_stat_runtime会被依次触发,另外,在这两个tracepoint期间有调用sched_info_arrive来计算sched_switch的next进程的调度时延的逻辑,如下:
上图中delta表示的是这一次的调度时延,而run_delay统计的是这个任务的总的调度时延。
所以,我们可以在sched_switch时记下next的还没计算和累加这一次调度时延的sched_info.run_delay的值,然后在紧接着调用了sched_info_arrive后,调用sched_stat_runtime时,把累加后的sched_info.run_delay的值减去之前保存的sched_info.run_delay值,就可以算出这一次的单次的调度时延。
2.2 实验验证效果
验证时,我们就挑一个核来打印(我们选cpu序号是10的核),这样dmesg打印出来的信息不会混杂,方便验证。
验证的方法,就是打印出我们计算出来的单次的调度时延,也同时打印在 调度时延的观测-CSDN博客 博客里介绍的新增的sched_delay的tracepoint的单次的调度时延的打印,比较两个数值是否一致。如果验证一致,那么很显然,我们就可以去掉内核里新增的这个sched_delay的tracepoint了,使用原生的tracepoint就可以达到高效的监控单次调度时延的目的。
验证的核心代码如下:
可以从下图中看到是一致的:
2.3 使用per-cpu变量实现单次调度时延监控
核心代码如下:
完整的代码(为了验证,可以打开#define SCHED_DELAY_REGISTER来做时延记录的对比,验证功能完整性):
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/errno.h>
#include <linux/stddef.h>
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include <trace/events/workqueue.h>
#include <linux/sched/clock.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/tracepoint.h>
#include <trace/events/osmonitor.h>
#include <trace/events/sched.h>
#include <trace/events/irq.h>
#include <trace/events/kmem.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <linux/sched/task_stack.h>
#include <linux/nmi.h>
#include <asm/apic.h>
#include <linux/version.h>
#include <linux/sched/mm.h>
#include <linux/hos_kernel_kerr.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("zhaoxin");
MODULE_DESCRIPTION("Monitor sched delay when not add new tracepoint.");
MODULE_VERSION("1.0");
struct kern_tracepoint {
void *callback;
struct tracepoint *ptr;
bool bregister;
};
static void clear_kern_tracepoint(struct kern_tracepoint *tp)
{
if (tp->bregister) {
tracepoint_probe_unregister(tp->ptr, tp->callback, NULL);
}
}
#define INIT_KERN_TRACEPOINT(tracepoint_name) \
static struct kern_tracepoint mykern_##tracepoint_name = {.callback = NULL, .ptr = NULL, .bregister = false};
#define TRACEPOINT_CHECK_AND_SET(tracepoint_name) \
static void tracepoint_name##_tracepoint_check_and_set(struct tracepoint *tp, void *priv) \
{ \
if (!strcmp(#tracepoint_name, tp->name)) \
{ \
((struct kern_tracepoint *)priv)->ptr = tp; \
return; \
} \
}
INIT_KERN_TRACEPOINT(sched_switch)
TRACEPOINT_CHECK_AND_SET(sched_switch)
INIT_KERN_TRACEPOINT(sched_stat_runtime)
TRACEPOINT_CHECK_AND_SET(sched_stat_runtime)
//#define SCHED_DELAY_REGISTER
#ifdef SCHED_DELAY_REGISTER
INIT_KERN_TRACEPOINT(sched_delay)
TRACEPOINT_CHECK_AND_SET(sched_delay)
#endif
typedef struct sched_delay_percpu {
int last_pid;
u64 last_sum_delay;
} sched_delay_percpu;
#define SCHED_DELAY_PERCPU_INIT \
((sched_delay_percpu){ \
.last_pid = 0, \
.last_sum_delay = -1ull, \
})
DEFINE_PER_CPU(sched_delay_percpu, _sched_delay_percpu) = SCHED_DELAY_PERCPU_INIT;
static void cb_sched_switch(void *i_data, bool i_preempt,
struct task_struct *i_prev,
struct task_struct *i_next,
unsigned int i_prev_state)
{
sched_delay_percpu* pdelaypercpu = this_cpu_ptr(&_sched_delay_percpu);
pdelaypercpu->last_sum_delay = i_next->sched_info.run_delay;
pdelaypercpu->last_pid = i_next->pid;
}
#define DELAY_THRESHOLD_NS 1000000ull
static void cb_sched_stat_runtime(void *i_data, struct task_struct *i_curr,
u64 i_runtime, u64 i_vruntime)
{
sched_delay_percpu* pdelaypercpu = this_cpu_ptr(&_sched_delay_percpu);
u64 delay_delta;
if (pdelaypercpu->last_pid == 0 || pdelaypercpu->last_sum_delay == -1ull
|| pdelaypercpu->last_pid != i_curr->pid) {
return;
}
delay_delta = i_curr->sched_info.run_delay - pdelaypercpu->last_sum_delay;
if (delay_delta > DELAY_THRESHOLD_NS) {
printk("comm[%s]pid[%d]tgid[%d]sched_delay[%llu]\n", i_curr->comm, i_curr->pid, i_curr->tgid, delay_delta);
}
pdelaypercpu->last_pid = 0;
}
#ifdef SCHED_DELAY_REGISTER
static void cb_sched_delay(void *i_data, struct task_struct *i_next,
unsigned long long i_delta,
unsigned long long i_run_delay,
unsigned long i_pcount)
{
if (i_delta > DELAY_THRESHOLD_NS) {
printk("cb_sched_delay:comm[%s]pid[%d]tgid[%d]sched_delay[%llu]\n", i_next->comm, i_next->pid, i_next->tgid, i_delta);
}
}
#endif
static int __init testscheddelay_init(void)
{
mykern_sched_switch.callback = cb_sched_switch;
for_each_kernel_tracepoint(sched_switch_tracepoint_check_and_set, &mykern_sched_switch);
if (!mykern_sched_switch.ptr) {
printk(KERN_ERR "mykern_sched_switch register failed!\n");
return -1;
}
else {
printk(KERN_INFO "mykern_sched_switch register succeeded!\n");
}
tracepoint_probe_register(mykern_sched_switch.ptr, mykern_sched_switch.callback, NULL);
mykern_sched_switch.bregister = 1;
mykern_sched_stat_runtime.callback = cb_sched_stat_runtime;
for_each_kernel_tracepoint(sched_stat_runtime_tracepoint_check_and_set, &mykern_sched_stat_runtime);
if (!mykern_sched_stat_runtime.ptr) {
printk(KERN_ERR "mykern_sched_stat_runtime register failed!\n");
return -1;
}
else {
printk(KERN_INFO "mykern_sched_stat_runtime register succeeded!\n");
}
tracepoint_probe_register(mykern_sched_stat_runtime.ptr, mykern_sched_stat_runtime.callback, NULL);
mykern_sched_stat_runtime.bregister = 1;
#ifdef SCHED_DELAY_REGISTER
mykern_sched_delay.callback = cb_sched_delay;
for_each_kernel_tracepoint(sched_delay_tracepoint_check_and_set, &mykern_sched_delay);
if (!mykern_sched_delay.ptr) {
HOS_KERNEL_KLOG_ERR("mykern_sched_delay register failed!\n");
return 0;
}
else {
HOS_KERNEL_KLOG_INFO("mykern_sched_delay register succeeded!\n");
}
tracepoint_probe_register(mykern_sched_delay.ptr, mykern_sched_delay.callback, NULL);
mykern_sched_delay.bregister = 1;
#endif
return 0;
}
static void __exit testscheddelay_exit(void)
{
clear_kern_tracepoint(&mykern_sched_switch);
clear_kern_tracepoint(&mykern_sched_stat_runtime);
#ifdef SCHED_DELAY_REGISTER
clear_kern_tracepoint(&mykern_sched_delay);
#endif
tracepoint_synchronize_unregister();
printk(KERN_INFO "testscheddelay: Module unloaded.\n");
}
module_init(testscheddelay_init);
module_exit(testscheddelay_exit);
下图是为了验证时延记录对比,打开了#define SCHED_DELAY_REGISTER来编译出的ko,insmod后,dmesg的打印,insmod后,我运行了若干个死循环程序,并都绑定在核10上:
标签:sched,delay,内核,时延,mykern,include,tracepoint,内核模块 From: https://blog.csdn.net/weixin_42766184/article/details/144134706