Merge commit 'v2.6.29-rc4' into sched/core
This commit is contained in:
@@ -54,6 +54,7 @@ asynchronous and synchronous parts of the kernel.
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
static async_cookie_t next_cookie = 1;
|
||||
@@ -132,21 +133,23 @@ static void run_one_entry(void)
|
||||
entry = list_first_entry(&async_pending, struct async_entry, list);
|
||||
|
||||
/* 2) move it to the running queue */
|
||||
list_del(&entry->list);
|
||||
list_add_tail(&entry->list, &async_running);
|
||||
list_move_tail(&entry->list, entry->running);
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
|
||||
/* 3) run it (and print duration)*/
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current));
|
||||
printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
|
||||
entry->func, task_pid_nr(current));
|
||||
calltime = ktime_get();
|
||||
}
|
||||
entry->func(entry->data, entry->cookie);
|
||||
if (initcall_debug && system_state == SYSTEM_BOOTING) {
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, calltime);
|
||||
printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie,
|
||||
entry->func, ktime_to_ns(delta) >> 10);
|
||||
printk("initcall %lli_%pF returned 0 after %lld usecs\n",
|
||||
(long long)entry->cookie,
|
||||
entry->func,
|
||||
(long long)ktime_to_ns(delta) >> 10);
|
||||
}
|
||||
|
||||
/* 4) remove it from the running queue */
|
||||
@@ -205,18 +208,44 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
|
||||
return newcookie;
|
||||
}
|
||||
|
||||
/**
|
||||
* async_schedule - schedule a function for asynchronous execution
|
||||
* @ptr: function to execute asynchronously
|
||||
* @data: data pointer to pass to the function
|
||||
*
|
||||
* Returns an async_cookie_t that may be used for checkpointing later.
|
||||
* Note: This function may be called from atomic or non-atomic contexts.
|
||||
*/
|
||||
async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
|
||||
{
|
||||
return __async_schedule(ptr, data, &async_pending);
|
||||
return __async_schedule(ptr, data, &async_running);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_schedule);
|
||||
|
||||
async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *running)
|
||||
/**
|
||||
* async_schedule_domain - schedule a function for asynchronous execution within a certain domain
|
||||
* @ptr: function to execute asynchronously
|
||||
* @data: data pointer to pass to the function
|
||||
* @running: running list for the domain
|
||||
*
|
||||
* Returns an async_cookie_t that may be used for checkpointing later.
|
||||
* @running may be used in the async_synchronize_*_domain() functions
|
||||
* to wait within a certain synchronization domain rather than globally.
|
||||
* A synchronization domain is specified via the running queue @running to use.
|
||||
* Note: This function may be called from atomic or non-atomic contexts.
|
||||
*/
|
||||
async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
|
||||
struct list_head *running)
|
||||
{
|
||||
return __async_schedule(ptr, data, running);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_schedule_special);
|
||||
EXPORT_SYMBOL_GPL(async_schedule_domain);
|
||||
|
||||
/**
|
||||
* async_synchronize_full - synchronize all asynchronous function calls
|
||||
*
|
||||
* This function waits until all asynchronous function calls have been done.
|
||||
*/
|
||||
void async_synchronize_full(void)
|
||||
{
|
||||
do {
|
||||
@@ -225,13 +254,30 @@ void async_synchronize_full(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_synchronize_full);
|
||||
|
||||
void async_synchronize_full_special(struct list_head *list)
|
||||
/**
|
||||
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
|
||||
* @list: running list to synchronize on
|
||||
*
|
||||
* This function waits until all asynchronous function calls for the
|
||||
* synchronization domain specified by the running list @list have been done.
|
||||
*/
|
||||
void async_synchronize_full_domain(struct list_head *list)
|
||||
{
|
||||
async_synchronize_cookie_special(next_cookie, list);
|
||||
async_synchronize_cookie_domain(next_cookie, list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_synchronize_full_special);
|
||||
EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
|
||||
|
||||
void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *running)
|
||||
/**
|
||||
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
|
||||
* @cookie: async_cookie_t to use as checkpoint
|
||||
* @running: running list to synchronize on
|
||||
*
|
||||
* This function waits until all asynchronous function calls for the
|
||||
* synchronization domain specified by the running list @list submitted
|
||||
* prior to @cookie have been done.
|
||||
*/
|
||||
void async_synchronize_cookie_domain(async_cookie_t cookie,
|
||||
struct list_head *running)
|
||||
{
|
||||
ktime_t starttime, delta, endtime;
|
||||
|
||||
@@ -247,14 +293,22 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r
|
||||
delta = ktime_sub(endtime, starttime);
|
||||
|
||||
printk("async_continuing @ %i after %lli usec\n",
|
||||
task_pid_nr(current), ktime_to_ns(delta) >> 10);
|
||||
task_pid_nr(current),
|
||||
(long long)ktime_to_ns(delta) >> 10);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
|
||||
EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
|
||||
|
||||
/**
|
||||
* async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
|
||||
* @cookie: async_cookie_t to use as checkpoint
|
||||
*
|
||||
* This function waits until all asynchronous function calls prior to @cookie
|
||||
* have been done.
|
||||
*/
|
||||
void async_synchronize_cookie(async_cookie_t cookie)
|
||||
{
|
||||
async_synchronize_cookie_special(cookie, &async_running);
|
||||
async_synchronize_cookie_domain(cookie, &async_running);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(async_synchronize_cookie);
|
||||
|
||||
@@ -315,7 +369,11 @@ static int async_manager_thread(void *unused)
|
||||
ec = atomic_read(&entry_count);
|
||||
|
||||
while (tc < ec && tc < MAX_THREADS) {
|
||||
kthread_run(async_thread, NULL, "async/%i", tc);
|
||||
if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
|
||||
tc))) {
|
||||
msleep(100);
|
||||
continue;
|
||||
}
|
||||
atomic_inc(&thread_count);
|
||||
tc++;
|
||||
}
|
||||
@@ -330,7 +388,9 @@ static int async_manager_thread(void *unused)
|
||||
static int __init async_init(void)
|
||||
{
|
||||
if (async_enabled)
|
||||
kthread_run(async_manager_thread, NULL, "async/mgr");
|
||||
if (IS_ERR(kthread_run(async_manager_thread, NULL,
|
||||
"async/mgr")))
|
||||
async_enabled = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1005,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
* triggers too late. This doesn't hurt, the check is only there
|
||||
* to stop root fork bombs.
|
||||
*/
|
||||
retval = -EAGAIN;
|
||||
if (nr_threads >= max_threads)
|
||||
goto bad_fork_cleanup_count;
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
desc = irq_desc_ptrs[irq];
|
||||
|
||||
if (desc && old_desc != desc)
|
||||
goto out_unlock;
|
||||
goto out_unlock;
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
@@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
init_copy_one_irq_desc(irq, old_desc, desc, cpu);
|
||||
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
/* free the old one */
|
||||
free_one_irq_desc(old_desc, desc);
|
||||
spin_unlock(&old_desc->lock);
|
||||
kfree(old_desc);
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
return desc;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
@@ -57,16 +57,6 @@ int pm_notifier_call_chain(unsigned long val)
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
int pm_test_level = TEST_NONE;
|
||||
|
||||
static int suspend_test(int level)
|
||||
{
|
||||
if (pm_test_level == level) {
|
||||
printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
|
||||
mdelay(5000);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char * const pm_tests[__TEST_AFTER_LAST] = {
|
||||
[TEST_NONE] = "none",
|
||||
[TEST_CORE] = "core",
|
||||
@@ -125,14 +115,24 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
}
|
||||
|
||||
power_attr(pm_test);
|
||||
#else /* !CONFIG_PM_DEBUG */
|
||||
static inline int suspend_test(int level) { return 0; }
|
||||
#endif /* !CONFIG_PM_DEBUG */
|
||||
#endif /* CONFIG_PM_DEBUG */
|
||||
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
||||
static int suspend_test(int level)
|
||||
{
|
||||
#ifdef CONFIG_PM_DEBUG
|
||||
if (pm_test_level == level) {
|
||||
printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
|
||||
mdelay(5000);
|
||||
return 1;
|
||||
}
|
||||
#endif /* !CONFIG_PM_DEBUG */
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_TEST_SUSPEND
|
||||
|
||||
/*
|
||||
|
||||
@@ -4793,8 +4793,8 @@ EXPORT_SYMBOL(default_wake_function);
|
||||
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
|
||||
* zero in this (rare) case, and we handle it by continuing to scan the queue.
|
||||
*/
|
||||
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
||||
int nr_exclusive, int sync, void *key)
|
||||
void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
|
||||
int nr_exclusive, int sync, void *key)
|
||||
{
|
||||
wait_queue_t *curr, *next;
|
||||
|
||||
|
||||
16
kernel/sys.c
16
kernel/sys.c
@@ -1525,22 +1525,14 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
|
||||
return -EFAULT;
|
||||
if (new_rlim.rlim_cur > new_rlim.rlim_max)
|
||||
return -EINVAL;
|
||||
old_rlim = current->signal->rlim + resource;
|
||||
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
|
||||
!capable(CAP_SYS_RESOURCE))
|
||||
return -EPERM;
|
||||
|
||||
if (resource == RLIMIT_NOFILE) {
|
||||
if (new_rlim.rlim_max == RLIM_INFINITY)
|
||||
new_rlim.rlim_max = sysctl_nr_open;
|
||||
if (new_rlim.rlim_cur == RLIM_INFINITY)
|
||||
new_rlim.rlim_cur = sysctl_nr_open;
|
||||
if (new_rlim.rlim_max > sysctl_nr_open)
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (new_rlim.rlim_cur > new_rlim.rlim_max)
|
||||
return -EINVAL;
|
||||
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
|
||||
return -EPERM;
|
||||
|
||||
retval = security_task_setrlimit(resource, &new_rlim);
|
||||
if (retval)
|
||||
|
||||
@@ -1737,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
rcu_read_lock();
|
||||
do_each_pid_task(pid, PIDTYPE_PID, p) {
|
||||
clear_tsk_trace_trace(p);
|
||||
} while_each_pid_task(pid, PIDTYPE_PID, p);
|
||||
rcu_read_unlock();
|
||||
|
||||
put_pid(pid);
|
||||
}
|
||||
|
||||
@@ -1747,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
rcu_read_lock();
|
||||
do_each_pid_task(pid, PIDTYPE_PID, p) {
|
||||
set_tsk_trace_trace(p);
|
||||
} while_each_pid_task(pid, PIDTYPE_PID, p);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void clear_ftrace_pid_task(struct pid **pid)
|
||||
|
||||
@@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_wait_exclusive);
|
||||
|
||||
/*
|
||||
* finish_wait - clean up after waiting in a queue
|
||||
* @q: waitqueue waited on
|
||||
* @wait: wait descriptor
|
||||
*
|
||||
* Sets current thread back to running state and removes
|
||||
* the wait descriptor from the given waitqueue if still
|
||||
* queued.
|
||||
*/
|
||||
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
|
||||
}
|
||||
EXPORT_SYMBOL(finish_wait);
|
||||
|
||||
/*
|
||||
* abort_exclusive_wait - abort exclusive waiting in a queue
|
||||
* @q: waitqueue waited on
|
||||
* @wait: wait descriptor
|
||||
* @state: runstate of the waiter to be woken
|
||||
* @key: key to identify a wait bit queue or %NULL
|
||||
*
|
||||
* Sets current thread back to running state and removes
|
||||
* the wait descriptor from the given waitqueue if still
|
||||
* queued.
|
||||
*
|
||||
* Wakes up the next waiter if the caller is concurrently
|
||||
* woken up through the queue.
|
||||
*
|
||||
* This prevents waiter starvation where an exclusive waiter
|
||||
* aborts and is woken up concurrently and noone wakes up
|
||||
* the next waiter.
|
||||
*/
|
||||
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
|
||||
unsigned int mode, void *key)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (!list_empty(&wait->task_list))
|
||||
list_del_init(&wait->task_list);
|
||||
else if (waitqueue_active(q))
|
||||
__wake_up_common(q, mode, 1, 0, key);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(abort_exclusive_wait);
|
||||
|
||||
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
int ret = default_wake_function(wait, mode, sync, key);
|
||||
@@ -177,17 +219,20 @@ int __sched
|
||||
__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
||||
int (*action)(void *), unsigned mode)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
do {
|
||||
int ret;
|
||||
|
||||
prepare_to_wait_exclusive(wq, &q->wait, mode);
|
||||
if (test_bit(q->key.bit_nr, q->key.flags)) {
|
||||
if ((ret = (*action)(q->key.flags)))
|
||||
break;
|
||||
}
|
||||
if (!test_bit(q->key.bit_nr, q->key.flags))
|
||||
continue;
|
||||
ret = action(q->key.flags);
|
||||
if (!ret)
|
||||
continue;
|
||||
abort_exclusive_wait(wq, &q->wait, mode, &q->key);
|
||||
return ret;
|
||||
} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
|
||||
finish_wait(wq, &q->wait);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(__wait_on_bit_lock);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user