Merge branch 'sched-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (38 commits) sched debug: add name to sched_domain sysctl entries sched: sync wakeups vs avg_overlap sched: remove redundant code in cpu_cgroup_create() sched_rt.c: resch needed in rt_rq_enqueue() for the root rt_rq cpusets: scan_for_empty_cpusets(), cpuset doesn't seem to be so const sched: minor optimizations in wake_affine and select_task_rq_fair sched: maintain only task entities in cfs_rq->tasks list sched: fixup buddy selection sched: more sanity checks on the bandwidth settings sched: add some comments to the bandwidth code sched: fixlet for group load balance sched: rework wakeup preemption CFS scheduler: documentation about scheduling policies sched: clarify ifdef tangle sched: fix list traversal to use _rcu variant sched: turn off WAKEUP_OVERLAP sched: wakeup preempt when small overlap kernel/cpu.c: create a CPU_STARTING cpu_chain notifier kernel/cpu.c: Move the CPU_DYING notifiers sched: fix __load_balance_iterator() for cfq with only one task ...
This commit is contained in:
@@ -10,6 +10,18 @@
|
||||
|
||||
#include <linux/wait.h>
|
||||
|
||||
/**
|
||||
* struct completion - structure used to maintain state for a "completion"
|
||||
*
|
||||
* This is the opaque structure used to maintain the state for a "completion".
|
||||
* Completions currently use a FIFO to queue threads that have to wait for
|
||||
* the "completion" event.
|
||||
*
|
||||
* See also: complete(), wait_for_completion() (and friends _timeout,
|
||||
* _interruptible, _interruptible_timeout, and _killable), init_completion(),
|
||||
* and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
|
||||
* INIT_COMPLETION().
|
||||
*/
|
||||
struct completion {
|
||||
unsigned int done;
|
||||
wait_queue_head_t wait;
|
||||
@@ -21,6 +33,14 @@ struct completion {
|
||||
#define COMPLETION_INITIALIZER_ONSTACK(work) \
|
||||
({ init_completion(&work); work; })
|
||||
|
||||
/**
|
||||
* DECLARE_COMPLETION: - declare and initialize a completion structure
|
||||
* @work: identifier for the completion structure
|
||||
*
|
||||
* This macro declares and initializes a completion structure. Generally used
|
||||
* for static declarations. You should use the _ONSTACK variant for automatic
|
||||
* variables.
|
||||
*/
|
||||
#define DECLARE_COMPLETION(work) \
|
||||
struct completion work = COMPLETION_INITIALIZER(work)
|
||||
|
||||
@@ -29,6 +49,13 @@ struct completion {
|
||||
* completions - so we use the _ONSTACK() variant for those that
|
||||
* are on the kernel stack:
|
||||
*/
|
||||
/**
|
||||
* DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
|
||||
* @work: identifier for the completion structure
|
||||
*
|
||||
* This macro declares and initializes a completion structure on the kernel
|
||||
* stack.
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define DECLARE_COMPLETION_ONSTACK(work) \
|
||||
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
|
||||
@@ -36,6 +63,13 @@ struct completion {
|
||||
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* init_completion: - Initialize a dynamically allocated completion
|
||||
* @x: completion structure that is to be initialized
|
||||
*
|
||||
* This inline function will initialize a dynamically created completion
|
||||
* structure.
|
||||
*/
|
||||
static inline void init_completion(struct completion *x)
|
||||
{
|
||||
x->done = 0;
|
||||
@@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x);
|
||||
extern void complete(struct completion *);
|
||||
extern void complete_all(struct completion *);
|
||||
|
||||
/**
|
||||
* INIT_COMPLETION: - reinitialize a completion structure
|
||||
* @x: completion structure to be reinitialized
|
||||
*
|
||||
* This macro should be used to reinitialize a completion structure so it can
|
||||
* be reused. This is especially important after complete_all() is used.
|
||||
*/
|
||||
#define INIT_COMPLETION(x) ((x).done = 0)
|
||||
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
||||
#endif
|
||||
|
||||
int cpu_up(unsigned int cpu);
|
||||
void notify_cpu_starting(unsigned int cpu);
|
||||
extern void cpu_hotplug_init(void);
|
||||
extern void cpu_maps_update_begin(void);
|
||||
extern void cpu_maps_update_done(void);
|
||||
|
||||
@@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret)
|
||||
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
|
||||
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
|
||||
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
|
||||
* not handling interrupts, soon dead */
|
||||
* not handling interrupts, soon dead.
|
||||
* Called on the dying cpu, interrupts
|
||||
* are already disabled. Must not
|
||||
* sleep, must not fail */
|
||||
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
|
||||
* lock is dropped */
|
||||
#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
|
||||
* Called on the new cpu, just before
|
||||
* enabling interrupts. Must not sleep,
|
||||
* must not fail */
|
||||
|
||||
/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
|
||||
* operation in progress
|
||||
@@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret)
|
||||
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
|
||||
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
|
||||
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
|
||||
#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
|
||||
|
||||
/* Hibernation and suspend events */
|
||||
#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
|
||||
|
||||
@@ -104,8 +104,8 @@ struct prop_local_single {
|
||||
* snapshot of the last seen global state
|
||||
* and a lock protecting this state
|
||||
*/
|
||||
int shift;
|
||||
unsigned long period;
|
||||
int shift;
|
||||
spinlock_t lock; /* protect the snapshot state */
|
||||
};
|
||||
|
||||
|
||||
@@ -451,8 +451,8 @@ struct signal_struct {
|
||||
* - everyone except group_exit_task is stopped during signal delivery
|
||||
* of fatal signals, group_exit_task processes the signal.
|
||||
*/
|
||||
struct task_struct *group_exit_task;
|
||||
int notify_count;
|
||||
struct task_struct *group_exit_task;
|
||||
|
||||
/* thread group stop support, overloads group_exit_code too */
|
||||
int group_stop_count;
|
||||
@@ -824,6 +824,9 @@ struct sched_domain {
|
||||
unsigned int ttwu_move_affine;
|
||||
unsigned int ttwu_move_balance;
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
char *name;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
|
||||
@@ -897,7 +900,7 @@ struct sched_class {
|
||||
void (*yield_task) (struct rq *rq);
|
||||
int (*select_task_rq)(struct task_struct *p, int sync);
|
||||
|
||||
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
|
||||
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
|
||||
|
||||
struct task_struct * (*pick_next_task) (struct rq *rq);
|
||||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||
@@ -1010,8 +1013,8 @@ struct sched_entity {
|
||||
|
||||
struct sched_rt_entity {
|
||||
struct list_head run_list;
|
||||
unsigned int time_slice;
|
||||
unsigned long timeout;
|
||||
unsigned int time_slice;
|
||||
int nr_cpus_allowed;
|
||||
|
||||
struct sched_rt_entity *back;
|
||||
|
||||
Reference in New Issue
Block a user