Merge branch 'linus' into sched/core

Merge reason: pick up the latest fixes.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar
2011-10-04 11:08:16 +02:00
398 changed files with 3552 additions and 2725 deletions

View File

@@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
desc->depth = 1;
if (desc->irq_data.chip->irq_shutdown)
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
if (desc->irq_data.chip->irq_disable)
else if (desc->irq_data.chip->irq_disable)
desc->irq_data.chip->irq_disable(&desc->irq_data);
else
desc->irq_data.chip->irq_mask(&desc->irq_data);

View File

@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain)
*/
for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) {
d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq));
if (d || d->domain) {
if (!d) {
WARN(1, "error: assigning domain to non existant irq_desc");
return;
}
if (d->domain) {
/* things are broken; just report, don't clean up */
WARN(1, "error: irq_desc already assigned to a domain");
return;

View File

@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
do {
times->utime = cputime_add(times->utime, t->utime);
times->stime = cputime_add(times->stime, t->stime);
times->sum_exec_runtime += t->se.sum_exec_runtime;
times->sum_exec_runtime += task_sched_runtime(t);
} while_each_thread(tsk, t);
out:
rcu_read_unlock();
@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
cpu->cpu = cputime.utime;
break;
case CPUCLOCK_SCHED:
cpu->sched = thread_group_sched_runtime(p);
thread_group_cputime(p, &cputime);
cpu->sched = cputime.sum_exec_runtime;
break;
}
return 0;

View File

@@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request,
break;
si = child->last_siginfo;
if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
break;
child->jobctl |= JOBCTL_LISTENING;
/*
* If NOTIFY is set, it means event happened between start
* of this trap and now. Trigger re-trap immediately.
*/
if (child->jobctl & JOBCTL_TRAP_NOTIFY)
signal_wake_up(child, true);
if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
child->jobctl |= JOBCTL_LISTENING;
/*
* If NOTIFY is set, it means event happened between
* start of this trap and now. Trigger re-trap.
*/
if (child->jobctl & JOBCTL_TRAP_NOTIFY)
signal_wake_up(child, true);
ret = 0;
}
unlock_task_sighand(child, &flags);
ret = 0;
break;
case PTRACE_DETACH: /* detach a process that was attached. */

View File

@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old,
else
tmp.end = root->end;
if (tmp.end < tmp.start)
goto next;
resource_clip(&tmp, constraint->min, constraint->max);
arch_remove_reservations(&tmp);
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old,
return 0;
}
}
if (!this)
next: if (!this || this->end == root->end)
break;
if (this != old)
tmp.start = this->end + 1;
this = this->sibling;

View File

@@ -3865,30 +3865,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}
/*
* Return sum_exec_runtime for the thread group.
* In case the task is currently running, return the sum plus current's
* pending runtime that have not been accounted yet.
*
* Note that the thread group might have other running tasks as well,
* so the return value not includes other pending runtime that other
* running tasks might have.
*/
unsigned long long thread_group_sched_runtime(struct task_struct *p)
{
struct task_cputime totals;
unsigned long flags;
struct rq *rq;
u64 ns;
rq = task_rq_lock(p, &flags);
thread_group_cputime(p, &totals);
ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, p, &flags);
return ns;
}
/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
@@ -4513,7 +4489,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
asmlinkage void schedule(void)
asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;

View File

@@ -1031,7 +1031,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
*/
if (curr && unlikely(rt_task(curr)) &&
(curr->rt.nr_cpus_allowed < 2 ||
curr->prio < p->prio) &&
curr->prio <= p->prio) &&
(p->rt.nr_cpus_allowed > 1)) {
int target = find_lowest_rq(p);
@@ -1563,7 +1563,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
p->rt.nr_cpus_allowed > 1 &&
rt_task(rq->curr) &&
(rq->curr->rt.nr_cpus_allowed < 2 ||
rq->curr->prio < p->prio))
rq->curr->prio <= p->prio))
push_rt_tasks(rq);
}

View File

@@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = {
.cmd = TASKSTATS_CMD_GET,
.doit = taskstats_user_cmd,
.policy = taskstats_cmd_get_policy,
.flags = GENL_ADMIN_PERM,
};
static struct genl_ops cgroupstats_ops = {

View File

@@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
#define KB 1024
#define MB (1024*KB)
#define KB_MASK (~(KB-1))
/*
* fill in extended accounting fields
*/
@@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB;
mmput(mm);
}
stats->read_char = p->ioac.rchar;
stats->write_char = p->ioac.wchar;
stats->read_syscalls = p->ioac.syscr;
stats->write_syscalls = p->ioac.syscw;
stats->read_char = p->ioac.rchar & KB_MASK;
stats->write_char = p->ioac.wchar & KB_MASK;
stats->read_syscalls = p->ioac.syscr & KB_MASK;
stats->write_syscalls = p->ioac.syscw & KB_MASK;
#ifdef CONFIG_TASK_IO_ACCOUNTING
stats->read_bytes = p->ioac.read_bytes;
stats->write_bytes = p->ioac.write_bytes;
stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes;
stats->read_bytes = p->ioac.read_bytes & KB_MASK;
stats->write_bytes = p->ioac.write_bytes & KB_MASK;
stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK;
#else
stats->read_bytes = 0;
stats->write_bytes = 0;

View File

@@ -2412,8 +2412,13 @@ reflush:
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
bool drained;
if (!cwq->nr_active && list_empty(&cwq->delayed_works))
spin_lock_irq(&cwq->gcwq->lock);
drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
spin_unlock_irq(&cwq->gcwq->lock);
if (drained)
continue;
if (++flush_cnt == 10 ||