版权声明:转载请说明,谢谢。 https://blog.csdn.net/wuming_422103632/article/details/81905561
计算过程如下:
static bool cpu_overutilized(int cpu)
{
return __cpu_overutilized(cpu, 0, NULL);
}
static bool __cpu_overutilized(int cpu, int delta, struct task_struct *p)
{
struct rq *rq = cpu_rq(cpu);
//下面两个数值在update_cpu_capacity函数中update的。下面会解释
unsigned long capacity_orig = capacity_orig_of(cpu);
unsigned long max_capacity = rq->rd->max_cpu_capacity.val;
if (capacity_orig == max_capacity) {
/*
* Waking task on idle big cpu or
* less than one task running on big cpu
*/
if (p && ((p->state == TASK_WAKING &&
idle_cpu(cpu)) ||
(p->state != TASK_WAKING &&
rq->nr_running <= 1)))
return false;
if (!p && rq->nr_running <= 1)
return false;
}
return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
}
具体获取的函数如下:
//初始化root_domain 成员变量 struct max_cpu_capacity
void init_max_cpu_capacity(struct max_cpu_capacity *mcc)
{
raw_spin_lock_init(&mcc->lock);
mcc->val = 0;
mcc->cpu = -1;
}
/*update rq 成员变量cpu_capacity_orig和root_domain 里面max_cpu_capacity成
员变量数值*/
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
/*由于CONFIG_64BIT_ONLY_CPU=n,所以按照我们之前讲过的scale cpu capacity处理
即相当于这个函数scale_cpu_capacity*/
unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
struct sched_group *sdg = sd->groups;
struct max_cpu_capacity *mcc;
unsigned long max_capacity;
int max_cap_cpu;
unsigned long flags;
/*即为此cpu的capacity,也是实际的capacity,dts里面规定的capacity数值*/
cpu_rq(cpu)->cpu_capacity_orig = capacity;
mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
raw_spin_lock_irqsave(&mcc->lock, flags);
max_capacity = mcc->val;
max_cap_cpu = mcc->cpu;
if ((max_capacity > capacity && max_cap_cpu == cpu) ||
(max_capacity < capacity)) {
/*update max_cpu_capacity结构体成员,获取整个topology cpu的max capacity
存储在 rd结构体变量max_cpu_capacity中*/
mcc->val = capacity;
mcc->cpu = cpu;
#ifdef CONFIG_SCHED_DEBUG
raw_spin_unlock_irqrestore(&mcc->lock, flags);
printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
cpu, capacity);
goto skip_unlock;
#endif
}
raw_spin_unlock_irqrestore(&mcc->lock, flags);
skip_unlock: __attribute__ ((unused));
capacity *= scale_rt_capacity(cpu);
capacity >>= SCHED_CAPACITY_SHIFT;
if (!capacity)
capacity = 1;
/*update sched domain group中sched group capacity成员变量和rq cpu_capacity
成员变量*/
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
sdg->sgc->max_capacity = capacity;
sdg->sgc->min_capacity = capacity;
}
从上面可以知道,如果capacity_orig == max_capacity成立。则接下来判断如干个条件是否满足cpu已经过载?
- task 真实存在 & task状态是TASK_WAKING,正在被waking过程 & cpu是处在idle状态
- task状态不是TASK_WAKING & rq的nr_running数量 <= 1
- 没有task存在 & rq rn_running数量 <= 1
只要满足上面三个条件之一,cpu就直接返回false,cpu不存过载情况。
判断上面的三个条件之后,就开始根据cpu负载情况来真实的计算cpu是否over utilization了。
(capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
对于cpu的util的计算在下面文章中讲解了:https://blog.csdn.net/wukongmingjing/article/details/81739394
cpu_util(cpu)数值在[0,1024]这范围之内
capacity_margin是一个常量:1138,(约等于1024*1.1)
delta=0
capacity_of(cpu)数值是一个变量,<= capacity_orig,区分big/little架构不同,比如在我现在的手机上little core的capacity_orig=785,big core的capacity_orig=1024
最后计算他们的数值就可以判断是否过载了。打印的信息如下:
cpu=0,capacity_orig=782,max_capacity=1024,capacity_of=735,util=782,overutil=1
cpu=1,capacity_orig=782,max_capacity=1024,capacity_of=745,util=720,overutil=1
cpu=2,capacity_orig=782,max_capacity=1024,capacity_of=730,util=537,overutil=0
cpu=3,capacity_orig=782,max_capacity=1024,capacity_of=737,util=10,overutil=0
cpu=4,capacity_orig=1024,max_capacity=1024,capacity_of=1022,util=0,overutil=0
cpu=5,capacity_orig=1024,max_capacity=1024,capacity_of=1023,util=0,overutil=0
cpu=6,capacity_orig=1024,max_capacity=1024,capacity_of=1022,util=0,overutil=0
cpu=7,capacity_orig=1024,max_capacity=1024,capacity_of=1024,util=0,overutil=0
计算原理比较简单,但是存在一个遗留的action:
capacity_of(cpu)是会变化的,目前还没有找是在哪里修改的,但是知道这个涉及到domain/group的balance。