diff -urpNa -X dontdiff linux-2.6.34-rc1/arch/powerpc/platforms/pseries/offline_states.h linux-2.6.34-rc1-tip.007b0924-rcu/arch/powerpc/platforms/pseries/offline_states.h --- linux-2.6.34-rc1/arch/powerpc/platforms/pseries/offline_states.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc1-tip.007b0924-rcu/arch/powerpc/platforms/pseries/offline_states.h 2010-03-15 16:32:29.000000000 -0700 @@ -9,10 +9,40 @@ enum cpu_state_vals { CPU_MAX_OFFLINE_STATES }; +#ifdef CONFIG_HOTPLUG_CPU + extern enum cpu_state_vals get_cpu_current_state(int cpu); extern void set_cpu_current_state(int cpu, enum cpu_state_vals state); extern enum cpu_state_vals get_preferred_offline_state(int cpu); extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state); extern void set_default_offline_state(int cpu); + +#else /* #ifdef CONFIG_HOTPLUG_CPU */ + +static inline enum cpu_state_vals get_cpu_current_state(int cpu) +{ + return cpu_online(cpu) ? CPU_STATE_ONLINE : CPU_STATE_OFFLINE; +} + +static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state) +{ +} + +static inline enum cpu_state_vals get_preferred_offline_state(int cpu) +{ + return CPU_STATE_OFFLINE; +} + +static inline void set_preferred_offline_state(int cpu, + enum cpu_state_vals state) +{ +} + +static inline void set_default_offline_state(int cpu) +{ +} + +#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ + extern int start_secondary(void); #endif diff -urpNa -X dontdiff linux-2.6.34-rc1/include/linux/rcupdate.h linux-2.6.34-rc1-tip.007b0924-rcu/include/linux/rcupdate.h --- linux-2.6.34-rc1/include/linux/rcupdate.h 2010-03-15 12:46:13.000000000 -0700 +++ linux-2.6.34-rc1-tip.007b0924-rcu/include/linux/rcupdate.h 2010-03-27 22:47:09.000000000 -0700 @@ -97,6 +97,11 @@ extern struct lockdep_map rcu_sched_lock # define rcu_read_release_sched() \ lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) +static inline int debug_lockdep_rcu_enabled(void) +{ + return likely(rcu_scheduler_active && debug_locks); +} + /** * rcu_read_lock_held - might we be in RCU read-side critical section? * @@ -104,28 +109,21 @@ extern struct lockdep_map rcu_sched_lock * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. + * + * Check rcu_scheduler_active to prevent false positives during boot. */ static inline int rcu_read_lock_held(void) { - if (debug_locks) - return lock_is_held(&rcu_lock_map); - return 1; + if (!debug_lockdep_rcu_enabled()) + return 1; + return lock_is_held(&rcu_lock_map); } -/** - * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? - * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, - * this assumes we are in an RCU-bh read-side critical section unless it can - * prove otherwise. +/* + * rcu_read_lock_bh_held() is defined out of line to avoid #include-file + * hell. */ -static inline int rcu_read_lock_bh_held(void) -{ - if (debug_locks) - return lock_is_held(&rcu_bh_lock_map); - return 1; -} +extern int rcu_read_lock_bh_held(void); /** * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? @@ -135,15 +133,26 @@ static inline int rcu_read_lock_bh_held( * this assumes we are in an RCU-sched read-side critical section unless it * can prove otherwise. Note that disabling of preemption (including * disabling irqs) counts as an RCU-sched read-side critical section. + * + * Check rcu_scheduler_active to prevent false positives during boot. */ +#ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { int lockdep_opinion = 0; + if (!debug_lockdep_rcu_enabled()) + return 1; if (debug_locks) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); - return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; + return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); +} +#else /* #ifdef CONFIG_PREEMPT */ +static inline int rcu_read_lock_sched_held(void) +{ + return 1; } +#endif /* #else #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -164,10 +173,17 @@ static inline int rcu_read_lock_bh_held( return 1; } +#ifdef CONFIG_PREEMPT +static inline int rcu_read_lock_sched_held(void) +{ + return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); +} +#else /* #ifdef CONFIG_PREEMPT */ static inline int rcu_read_lock_sched_held(void) { - return preempt_count() != 0 || !rcu_scheduler_active; + return 1; } +#endif /* #else #ifdef CONFIG_PREEMPT */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -184,7 +200,7 @@ static inline int rcu_read_lock_sched_he */ #define rcu_dereference_check(p, c) \ ({ \ - if (debug_locks && !(c)) \ + if (debug_lockdep_rcu_enabled() && !(c)) \ lockdep_rcu_dereference(__FILE__, __LINE__); \ rcu_dereference_raw(p); \ }) diff -urpNa -X dontdiff linux-2.6.34-rc1/include/linux/tracepoint.h linux-2.6.34-rc1-tip.007b0924-rcu/include/linux/tracepoint.h --- linux-2.6.34-rc1/include/linux/tracepoint.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc1-tip.007b0924-rcu/include/linux/tracepoint.h 2010-03-27 22:47:09.000000000 -0700 @@ -49,7 +49,7 @@ struct tracepoint { void **it_func; \ \ rcu_read_lock_sched_notrace(); \ - it_func = rcu_dereference((tp)->funcs); \ + it_func = rcu_dereference_sched((tp)->funcs); \ if (it_func) { \ do { \ ((void(*)(proto))(*it_func))(args); \ diff -urpNa -X dontdiff linux-2.6.34-rc1/kernel/rcupdate.c linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcupdate.c --- linux-2.6.34-rc1/kernel/rcupdate.c 2010-03-15 12:46:14.000000000 -0700 +++ linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcupdate.c 2010-03-27 22:47:09.000000000 -0700 @@ -45,6 +45,7 @@ #include #include #include +#include #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key rcu_lock_key; @@ -66,6 +67,28 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map); int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +/** + * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? + * + * Check for bottom half being disabled, which covers both the + * CONFIG_PROVE_RCU and not cases. Note that if someone uses + * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) + * will show the situation. + * + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. + */ +int rcu_read_lock_bh_held(void) +{ + if (!debug_lockdep_rcu_enabled()) + return 1; + return in_softirq(); +} +EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); + +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + /* * This function is invoked towards the end of the scheduler's initialization * process. Before this is called, the idle task might contain diff -urpNa -X dontdiff linux-2.6.34-rc1/kernel/rcutree.c linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcutree.c --- linux-2.6.34-rc1/kernel/rcutree.c 2010-03-15 12:46:14.000000000 -0700 +++ linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcutree.c 2010-03-27 22:48:37.000000000 -0700 @@ -1236,11 +1236,11 @@ static void force_quiescent_state(struct break; /* grace period idle or initializing, ignore. */ case RCU_SAVE_DYNTICK: - - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) break; /* So gcc recognizes the dead code. */ + raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ + /* Record dyntick-idle state. */ force_qs_rnp(rsp, dyntick_save_progress_counter); raw_spin_lock(&rnp->lock); /* irqs already disabled */ @@ -1499,6 +1499,16 @@ static int __rcu_pending(struct rcu_stat /* Is the RCU core waiting for a quiescent state from this CPU? */ if (rdp->qs_pending) { + + /* + * If force_quiescent_state() coming soon and this CPU + * needs a quiescent state, and this is either RCU-sched + * or RCU-bh, force a local reschedule. + */ + if (!rdp->preemptable && + ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, + jiffies)) + set_need_resched(); rdp->n_rp_qs_pending++; return 1; } @@ -1849,6 +1859,14 @@ static void __init rcu_init_one(struct r INIT_LIST_HEAD(&rnp->blocked_tasks[3]); } } + + rnp = rsp->level[NUM_RCU_LVLS - 1]; + for_each_possible_cpu(i) { + if (i > rnp->grphi) + rnp++; + rsp->rda[i]->mynode = rnp; + rcu_boot_init_percpu_data(i, rsp); + } } /* @@ -1859,19 +1877,11 @@ static void __init rcu_init_one(struct r #define RCU_INIT_FLAVOR(rsp, rcu_data) \ do { \ int i; \ - int j; \ - struct rcu_node *rnp; \ \ - rcu_init_one(rsp); \ - rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ - j = 0; \ for_each_possible_cpu(i) { \ - if (i > rnp[j].grphi) \ - j++; \ - per_cpu(rcu_data, i).mynode = &rnp[j]; \ (rsp)->rda[i] = &per_cpu(rcu_data, i); \ - rcu_boot_init_percpu_data(i, rsp); \ } \ + rcu_init_one(rsp); \ } while (0) void __init rcu_init(void) diff -urpNa -X dontdiff linux-2.6.34-rc1/kernel/rcutree.h linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcutree.h --- linux-2.6.34-rc1/kernel/rcutree.h 2010-03-15 12:46:14.000000000 -0700 +++ linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcutree.h 2010-03-15 13:26:00.000000000 -0700 @@ -246,12 +246,21 @@ struct rcu_data { #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ #ifdef CONFIG_RCU_CPU_STALL_DETECTOR -#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ -#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ -#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ - /* to take at least one */ - /* scheduling clock irq */ - /* before ratting on them. */ + +#ifdef CONFIG_PROVE_RCU +#define RCU_STALL_DELAY_DELTA (5 * HZ) +#else +#define RCU_STALL_DELAY_DELTA 0 +#endif + +#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA) + /* for rsp->jiffies_stall */ +#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA) + /* for rsp->jiffies_stall */ +#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ + /* to take at least one */ + /* scheduling clock irq */ + /* before ratting on them. */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ diff -urpNa -X dontdiff linux-2.6.34-rc1/kernel/rcutree_plugin.h linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcutree_plugin.h --- linux-2.6.34-rc1/kernel/rcutree_plugin.h 2010-03-15 12:46:14.000000000 -0700 +++ linux-2.6.34-rc1-tip.007b0924-rcu/kernel/rcutree_plugin.h 2010-03-15 13:25:49.000000000 -0700 @@ -1010,6 +1010,10 @@ int rcu_needs_cpu(int cpu) int c = 0; int thatcpu; + /* Check for being in the holdoff period. */ + if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) + return rcu_needs_cpu_quick_check(cpu); + /* Don't bother unless we are the last non-dyntick-idle CPU. */ for_each_cpu_not(thatcpu, nohz_cpu_mask) if (thatcpu != cpu) { @@ -1041,10 +1045,8 @@ int rcu_needs_cpu(int cpu) } /* If RCU callbacks are still pending, RCU still needs this CPU. */ - if (c) { + if (c) raise_softirq(RCU_SOFTIRQ); - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; - } return c; } diff -urpNa -X dontdiff linux-2.6.34-rc1/mm/mempolicy.c linux-2.6.34-rc1-tip.007b0924-rcu/mm/mempolicy.c --- linux-2.6.34-rc1/mm/mempolicy.c 2010-03-15 12:46:14.000000000 -0700 +++ linux-2.6.34-rc1-tip.007b0924-rcu/mm/mempolicy.c 2010-03-18 12:10:45.000000000 -0700 @@ -1756,10 +1756,12 @@ struct mempolicy *__mpol_dup(struct memp if (!new) return ERR_PTR(-ENOMEM); + rcu_read_lock(); if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); mpol_rebind_policy(old, &mems); } + rcu_read_unlock(); *new = *old; atomic_set(&new->refcnt, 1); return new;