手机做印章网站,diy手机壳定制网站,快速app开发平台,网站改版方案ppt继续来分析balance_tasks()函数,结合代码中的注释,理解这段代码应该很容易,在这里主要分析它的两个重要的子函数,即can_migrate_task()和pull_task().先来看can_migrate_task().该函数用来判断当前进程是否能够迁移到目标cpu上,代码如下:staticint can_migrate_task(struct tas…继续来分析balance_tasks()函数,结合代码中的注释,理解这段代码应该很容易,在这里主要分析它的两个重要的子函数,即can_migrate_task()和pull_task().先来看can_migrate_task().该函数用来判断当前进程是否能够迁移到目标cpu上,代码如下:staticint can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,struct sched_domain *sd, enum cpu_idle_type idle,int *all_pinned){/** We do not migrate tasks that are:* 1) running (obviously), or* 2) cannot be migrated to this CPU due to cpus_allowed, or* 3) are cache-hot on their current CPU.*//*如果进程不能在this_cpu上运行,不能迁移*/if (!cpumask_test_cpu(this_cpu, p-cpus_allowed)) {schedstat_inc(p, se.nr_failed_migrations_affine);return 0;}*all_pinned 0;/*如果进程正在运行,不能迁移*/if (task_running(rq, p)) {schedstat_inc(p, se.nr_failed_migrations_running);return 0;}/** Aggressive migration if:* 1) task is cache cold, or* 2) too many balance attempts have failed.*//*进程的cache是冷的,或者调度域load balance失败次数太多了.*可以迁移*/if (!task_hot(p, rq-clock, sd) ||sd-nr_balance_failed sd-cache_nice_tries) {#ifdef CONFIG_SCHEDSTATSif (task_hot(p, rq-clock, sd)) {schedstat_inc(sd, lb_hot_gained[idle]);schedstat_inc(p, se.nr_forced_migrations);}#endifreturn 1;}/*如果进程的cache是热的,不能迁移*/if (task_hot(p, rq-clock, sd)) {schedstat_inc(p, se.nr_failed_migrations_hot);return 0;}return 1;}特别注意一下,如果是进程不能在目标CPU上运行,将不会更新*all_pinned的值.在该函数中,代码中对task_hot()调用了两次,显然是值得优化的.对task的Cache是否为hot是在task_hot()中判断的,代码如下:static inttask_hot(struct task_struct *p, u64 now, struct sched_domain *sd){s64 delta;/** Buddy candidates are cache hot:*//*如果进程是cfs_rq的next或者last指向,说明这是一个优先调度的进程*Cache是热的*/if (sched_feat(CACHE_HOT_BUDDY) (p-se cfs_rq_of(p-se)-next ||p-se cfs_rq_of(p-se)-last))return 1;/*不为CFS调度类,Cache是冷的*/if (p-sched_class ! fair_sched_class)return 0;/*如果sysctl_sched_migration_cost为-1,进程Cache恒为*热,sysctl_sched_migration_cost为0,进程*Cache恒为冷*/if (sysctl_sched_migration_cost -1)return 1;if (sysctl_sched_migration_cost 0)return 0;delta now - p-se.exec_start;/*如果进程开始执行的时间到当前时间的间隔小于sysctl_sched_migration_cost*说明Cache是热的*/return delta (s64)sysctl_sched_migration_cost;}就不对这个过程做详细分析了,注释中已经说的很清楚了.pull_task()用来完在进程的迁移动作,代码如下:static void pull_task(struct rq *src_rq, struct task_struct *p,struct rq *this_rq, int this_cpu){/*从旧CPU上出列*/deactivate_task(src_rq, p, 0);/*更新进程的cpu指向*/set_task_cpu(p, this_cpu);/*在目标CPU上入列*/activate_task(this_rq, p, 0);/** Note that idle threads have a prio of MAX_PRIO, for this test* to be always true for them.*//*检查目标CPU上是否需要抢占*/check_preempt_curr(this_rq, p, 0);}由于更新了目标CPU上的进程,所以要检查一下目标CPU上是否需要抢占.3.2:cpu空闲时的load balance在cpu空闲时,也会主动进行load balance的操作.如下代码片段如示:asmlinkage void __sched schedule(void){if (unlikely(!rq-nr_running))idle_balance(cpu, rq);在schedule()中,如果运行队列为空,会调用idle_balance().关于idle_balance()的操作,在这里就不再重复讲述了,实际上,在之前的分析中,对CPU_NEWLY_IDLE类型的load balance关键地方都有指出.对于CPU_NEWLY_IDLE与其它类型的load balace的差别主要有以下几点:1:CPU_NEWLY_IDLE只要发现CPU空闲就会调用,而无调整时间间隔,并且在CPU_NEWLY_IDLE的loadbalance处理中,会将下次在tick中断中进行loadbalance的时间戳设为一个较小值,以便在tick中断中较快速的发现这个不平衡状态.2: CPU_NEWLY_IDLE类型的load balance操作中移动较小量的进程,只需保证CPU上有进程运行即可.3: CPU_NEWLY_IDLE是将其它CPU上的任务”拉”到本地CPU上.四: migration线程在load_balance()中,我们还看到,如果失败次数大于sd-cache_nice_tries2时,就会唤醒CPU的migration线程,我们来看一下该线程的运行.先来看以下代码:static int __init migration_init(void){void *cpu (void *)(long)smp_processor_id();int err;/* Start one for the boot CPU: */err migration_call(migration_notifier, CPU_UP_PREPARE, cpu);BUG_ON(err NOTIFY_BAD);migration_call(migration_notifier, CPU_ONLINE, cpu);register_cpu_notifier(migration_notifier);return err;}early_initcall(migration_init);在系统初始化时,migration_init()得到调用,并在该函数中注册了一个cpu notifier链,因此就可以捕捉hotplug cpu信息,在该链的处理函数中,如以下代码片段:*/static int __cpuinitmigration_call(struct notifier_block *nfb, unsigned long action, void *hcpu){switch (action) {case CPU_UP_PREPARE:case CPU_UP_PREPARE_FROZEN:p kthread_create(migration_thread, hcpu, migration/%d, cpu);if (IS_ERR(p))return NOTIFY_BAD;kthread_bind(p, cpu);/* Must be high prio: stop_machine expects to yield to it. */rq task_rq_lock(p, flags);__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);task_rq_unlock(rq, flags);cpu_rq(cpu)-migration_thread p;break;从此可以看到,每个cpu UP时,都会为其创建并绑定一个migration线程,并将其设置为了SCHED_FIFO的实时进程,具有较高的优先级.该线程的处理函数为migration_thread().代码如下:static int migration_thread(void *data){int cpu (long)data;struct rq *rq;rq cpu_rq(cpu);BUG_ON(rq-migration_thread ! current);set_current_state(TASK_INTERRUPTIBLE);while (!kthread_should_stop()) {struct migration_req *req;struct list_head *head;spin_lock_irq(rq-lock);/*如果该cpu已经离线了,跳转到wait_to_die,等待退出*/if (cpu_is_offline(cpu)) {spin_unlock_irq(rq-lock);goto wait_to_die;}/*如果active_balance为1,表示该cpu上有load balance失败的情况*/if (rq-active_balance) {active_load_balance(rq, cpu);rq-active_balance 0;}head rq-migration_queue;/*如果rg-migration为空,睡眠,直至唤醒*/if (list_empty(head)) {spin_unlock_irq(rq-lock);schedule();set_current_state(TASK_INTERRUPTIBLE);continue;}/*从migration_queue中取得队像,然后迁移进程*一般在execve或者是在设置进程的所属cpu的时候*会有这个操作*/req list_entry(head-next, struct migration_req, list);list_del_init(head-next);spin_unlock(rq-lock);__migrate_task(req-task, cpu, req-dest_cpu);local_irq_enable();/*处理完了,唤醒进在等待的进程*/complete(req-done);}__set_current_state(TASK_RUNNING);return 0;wait_to_die:/* Wait for kthread_stop */set_current_state(TASK_INTERRUPTIBLE);while (!kthread_should_stop()) {schedule();set_current_state(TASK_INTERRUPTIBLE);}__set_current_state(TASK_RUNNING);return 0;}4.1:active_load_balance()先来看这个函数的第一个操作,即active_load_balance().该函数是处理load balance失败的情况(在load_balance()中),代码如下:static void active_load_balance(struct rq *busiest_rq, int busiest_cpu){int target_cpu busiest_rq-push_cpu;struct sched_domain *sd;struct rq *target_rq;/* Is there any task to move? *//*如果繁忙队列中只有一个可运行进程了,不用进行load balance了*/if (busiest_rq-nr_running 1)return;target_rq cpu_rq(target_cpu);/** This condition is impossible, if it occurs* we need to fix it. Originally reported by* Bjorn Helgaas on a 128-cpu setup.*//*不可能出现繁忙队列就是本地队列的情况,因为在load balance时,找到的*最繁忙调度组和最繁忙队列都不是本地的*/BUG_ON(busiest_rq target_rq);/* move a task from busiest_rq to target_rq */double_lock_balance(busiest_rq, target_rq);update_rq_clock(busiest_rq);update_rq_clock(target_rq);/* Search for an sd spanning us and the target CPU. *//*找到目的cpu所在的域.在SMP中,只有一个基本调度哉*/for_each_domain(target_cpu, sd) {if ((sd-flags SD_LOAD_BALANCE) cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))break;}/* 如果找到了要负载平衡的调度域*/if (likely(sd)) {schedstat_inc(sd, alb_count);/*从繁忙队列上迁移一个进程到目的cpu上*/if (move_one_task(target_rq, target_cpu, busiest_rq,sd, CPU_IDLE))schedstat_inc(sd, alb_pushed);elseschedstat_inc(sd, alb_failed);}double_unlock_balance(busiest_rq, target_rq);}从此可以看到,当load balance失败的时候,只会从繁忙队列中移动一个进程到目标cpu上.来看一下具体的迁移过程,即move_one_task(),该函数是以CPU_IDLE参数进行调用的.代码如下:static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,struct sched_domain *sd, enum cpu_idle_type idle){const struct sched_class *class;for (class sched_class_highest; class; class class-next)if (class-move_one_task(this_rq, this_cpu, busiest, sd, idle))return 1;return 0;}从此即可以看出,直接调用调度类的move_one_task().在CFS中,该函数为move_one_task_fair().代码如下:static intmove_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,struct sched_domain *sd, enum cpu_idle_type idle){struct cfs_rq *busy_cfs_rq;struct rq_iterator cfs_rq_iterator;cfs_rq_iterator.start load_balance_start_fair;cfs_rq_iterator.next load_balance_next_fair;for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {/** pass busy_cfs_rq argument into* load_balance_[start|next]_fair iterators*/cfs_rq_iterator.arg busy_cfs_rq;if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,cfs_rq_iterator))return 1;}return 0;}在分析CFS组调度的时候,曾经分析过,CPU上的进程组都是挂在该cpu运行队列的leaf_cfs_rq_list队列上的,因此只需要遍历该链表就可以遍历该CPU上的进程组.在后面用的迭代器是在之前已经分析过了的,这里不再赘述,流程转入到iter_move_one_task():static intiter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,struct sched_domain *sd, enum cpu_idle_type idle,struct rq_iterator *iterator){struct task_struct *p iterator-start(iterator-arg);int pinned 0;while (p) {if (can_migrate_task(p, busiest, this_cpu, sd, idle, pinned)) {pull_task(busiest, p, this_rq, this_cpu);/** Right now, this is only the second place pull_task()* is called, so we can safely collect pull_task()* stats here rather than inside pull_task().*/schedstat_inc(sd, lb_gained[idle]);return 1;}p iterator-next(iterator-arg);}return 0;}只要该进程是可以与目标CPU关联的,那么就调用pull_task()与之关联,并且马上返回.该函数中涉及到的子函数在前面都已经分析过了,这里就不做详细分析了.4.2: rq-migration_queue接下来分析一下挂在rg-migration_queue中的对象的处理,首先我们得要知道是在什么情况下将对象挂到该链表上的.搜索kernel的代码可发现,是在migrate_task()函数中,代码如下:static intmigrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req){struct rq *rq task_rq(p);/** If the task is not on a runqueue (and not running), then* it is sufficient to simply update the tasks cpu field.*//*如果进程不处于运行状态,不需要迁移到目标cpu的运行队列中*只需要将其关联到目标cpu*/if (!p-se.on_rq !task_running(rq, p)) {set_task_cpu(p, dest_cpu);return 0;}/*初始化struct migration_req 结构,并将其链入进程所在cpu的migration_queue*/init_completion(req-done);req-task p;req-dest_cpu dest_cpu;list_add(req-list, rq-migration_queue);return 1;}该函数是将进程p移动到dest_cpu上.同时,搜索kernel源代码,发现有两种情况下会调用migrate_task().如下示:1:在更改进程所属cpu时:这种情况下,将进程迁移到新的CPU集上是理所当然的.如下代码片段如示:int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask){if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), req)) {/* Need help from migration thread: drop lock and wait. */task_rq_unlock(rq, flags);wake_up_process(rq-migration_thread);wait_for_completion(req.done);tlb_migrate_finish(p-mm);return 0;}}如示所示,new_mask表示进程p的新CPU集, cpumask_any_and(cpu_online_mask, new_mask)是指从cpu_online_mask和new_mask的交集中任选一个cpu(一般是序号最小的).它调用migrate_task()将请求链入到migration_queu链表.然后唤醒该cpu上的migration线程,并且等待操作的完成.2:在execev()时:在下面的代码片段中:do_execve() à sched_exec():void sched_exec(void){int new_cpu, this_cpu get_cpu();/*找到相同调度域中负载最轻的CPU*/new_cpu sched_balance_self(this_cpu, SD_BALANCE_EXEC);put_cpu();/*如果当前CPU不是负载最轻的CPU,将进程迁移到负载最轻的CPU*/if (new_cpu ! this_cpu)sched_migrate_task(current, new_cpu);}为什么要在execve()的时候调整所在的CPU呢?事实这时候调整CPU是最合适的,因为它此时占用的内存以及Cache损失是最小的.Sched_balance_self()就是找到当前cpu所在调度域中的负载最轻的CPU.该函数跟我们之前分析的find_busiest_group()的逻辑差不多.这里不做分析了.流程转入到sched_migrate_task().代码如下:static void sched_migrate_task(struct task_struct *p, int dest_cpu){struct migration_req req;unsigned long flags;struct rq *rq;rq task_rq_lock(p, flags);/*如果CPU不允许或者目标CPU已经离线了,退出*/if (!cpumask_test_cpu(dest_cpu, p-cpus_allowed)|| unlikely(!cpu_active(dest_cpu)))goto out;/* force the process onto the specified CPU *//*生成请求并且链入到migration_thread链表*/if (migrate_task(p, dest_cpu, req)) {/* Need to wait for migration thread (might exit: take ref). */struct task_struct *mt rq-migration_thread;get_task_struct(mt);task_rq_unlock(rq, flags);wake_up_process(mt);put_task_struct(mt);wait_for_completion(req.done);return;}out:task_rq_unlock(rq, flags);}这个过程跟set_cpus_allowed_ptr()中的处理差不多,请自行结合代码中的注释进行分析.接下来,我们来分析一下,到底migration线程怎么去处理这些请求.处理代码如下:migration_thread() à __migrate_task():static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu){struct rq *rq_dest, *rq_src;int ret 0, on_rq;if (unlikely(!cpu_active(dest_cpu)))return ret;rq_src cpu_rq(src_cpu);rq_dest cpu_rq(dest_cpu);double_rq_lock(rq_src, rq_dest);/* Already moved. *//*如果进程不在src_cpu上,可能已经迁移完成了.退出*/if (task_cpu(p) ! src_cpu)goto done;/* Affinity changed (again). *//*如果进程不允许运行在des_cpu上,退出*/if (!cpumask_test_cpu(dest_cpu, p-cpus_allowed))goto fail;/*将进程迁移到目的cpu*/on_rq p-se.on_rq;if (on_rq)deactivate_task(rq_src, p, 0);set_task_cpu(p, dest_cpu);if (on_rq) {activate_task(rq_dest, p, 0);check_preempt_curr(rq_dest, p, 0);}done:ret 1;fail:double_rq_unlock(rq_src, rq_dest);return ret;}这个过程很简单,就是进程的迁移.请对照代码自行分析,这里就不再赘述了.五:cpuset中遗留的调度域问题在分析cpuset子系统的时候,遇到了一个与调度域相关的接口partition_sched_domains().在本节中,来对它进行一个详细的分析.代码如下:void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,struct sched_domain_attr *dattr_new){int i, j, n;int new_topology;mutex_lock(sched_domains_mutex);/* always unregister in case we dont destroy any domains */unregister_sched_domain_sysctl();/* Let architecture update cpu core mappings. */new_topology arch_update_cpu_topology();n doms_new ? ndoms_new : 0;/* Destroy deleted domains *//*判断当前系统中的调度域是否与要设置的调度域有相同的部份*如有相同的部份,则这部份信息可以保存下来,不需要再次设置调度域*/for (i 0; i ndoms_cur; i) {/*如果有相同的,继续下一个*/for (j 0; j n !new_topology; j) {if (cpumask_equal(doms_cur[i], doms_new[j]) dattrs_equal(dattr_cur, i, dattr_new, j))goto match1;}/* no match - a current sched domain not in new doms_new[] *//*如果有不相同的,则需要对旧的调度域信息进行释放*/detach_destroy_domains(doms_cur i);match1:;}/*如果doms_new NULL,则必有ndoms_new 1*//*如果doms_new NULL,则取系统中除孤立CPU外的其它所有CPU,将其放至*同一个调度域*/if (doms_new NULL) {ndoms_cur 0;doms_new fallback_doms;cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);WARN_ON_ONCE(dattr_new);}/* Build new domains *//*构建立的调度域.同理,之前已经有的就不要再重建立了*/for (i 0; i ndoms_new; i) {for (j 0; j ndoms_cur !new_topology; j) {if (cpumask_equal(doms_new[i], doms_cur[j]) dattrs_equal(dattr_new, i, dattr_cur, j))goto match2;}/* no match - add a new doms_new */__build_sched_domains(doms_new i,dattr_new ? dattr_new i : NULL);match2:;}/* Remember the new sched domains *//*释放资源,更新doms_cur,ndoms_cur等全局信息*/if (doms_cur ! fallback_doms)kfree(doms_cur);kfree(dattr_cur); /* kfree(NULL) is safe */doms_cur doms_new;dattr_cur dattr_new;ndoms_cur ndoms_new;register_sched_domain_sysctl();mutex_unlock(sched_domains_mutex);}在这个函数中,会传入三个参数,ndoms_new表示调度域的个数,doms_new表示每个调度域中的cpu成员,它是一个structmask数组,有ndoms_new项,dattr_new是每个调度域的属性.关于调度域属性在分析Cpuset的时候分析过了,这里就不再重复了.在这里,有几个全局量:ndoms_cur:表示当前系统中的调度域个数doms_cur:是当前各调度域中的CPU位图dattr_cur:是当前各调度域中的属性该接口的逻辑很清晰,而且里面核心的子函数__build_sched_domains()已经在前面详细分析过了,所以这里就不再这个函数做过多的讲解了.六:小结SMP负载平衡的过程有的地方还是很晦涩,比如shares值与h_load的调整过程.进程负载的计算过程以及对负载平衡条件的判断也是一个理解的难点,不过,较2.6.9来说 ,逻辑还是清晰了不少.