diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/powerpc/mm/pgtable.c linux-2.6.34-rc4-bc293d62-rcu/arch/powerpc/mm/pgtable.c --- linux-2.6.34-rc4/arch/powerpc/mm/pgtable.c 2010-04-19 14:45:07.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/powerpc/mm/pgtable.c 2010-04-19 16:27:46.000000000 -0700 @@ -92,7 +92,6 @@ static void pte_free_rcu_callback(struct static void pte_free_submit(struct pte_freelist_batch *batch) { - INIT_RCU_HEAD(&batch->rcu); call_rcu(&batch->rcu, pte_free_rcu_callback); } diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/Kconfig linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/Kconfig --- linux-2.6.34-rc4/arch/sparc/Kconfig 2010-04-19 14:45:09.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/Kconfig 2010-04-19 15:01:10.000000000 -0700 @@ -37,6 +37,9 @@ config SPARC64 def_bool 64BIT select ARCH_SUPPORTS_MSI select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_GRAPH_FP_TEST + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_LMB diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/Kconfig.debug linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/Kconfig.debug --- linux-2.6.34-rc4/arch/sparc/Kconfig.debug 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/Kconfig.debug 2010-04-19 15:01:10.000000000 -0700 @@ -19,13 +19,10 @@ config DEBUG_DCFLUSH bool "D-cache flush debugging" depends on SPARC64 && DEBUG_KERNEL -config STACK_DEBUG - bool "Stack Overflow Detection Support" - config MCOUNT bool depends on SPARC64 - depends on STACK_DEBUG || FUNCTION_TRACER + depends on FUNCTION_TRACER default y config FRAME_POINTER diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/ftrace.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/ftrace.c --- linux-2.6.34-rc4/arch/sparc/kernel/ftrace.c 2010-04-19 14:45:09.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/ftrace.c 2010-04-19 15:01:10.000000000 -0700 @@ -13,7 +13,7 @@ static const u32 ftrace_nop = 0x01000000 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) { - static u32 call; + u32 call; s32 off; off = ((s32)addr - (s32)ip); @@ -91,3 +91,61 @@ int __init ftrace_dyn_arch_init(void *da return 0; } #endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); + return ftrace_modify_code(ip, old, new); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip = (unsigned long)(&ftrace_graph_call); + u32 old, new; + + old = *(u32 *) &ftrace_graph_call; + new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); + + return ftrace_modify_code(ip, old, new); +} + +#endif /* !CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +unsigned long prepare_ftrace_return(unsigned long parent, + unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long) &return_to_handler; + struct ftrace_graph_ent trace; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return parent + 8UL; + + if (ftrace_push_return_trace(parent, self_addr, &trace.depth, + frame_pointer) == -EBUSY) + return parent + 8UL; + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + return parent + 8UL; + } + + return return_hooker; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/irq_64.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/irq_64.c --- linux-2.6.34-rc4/arch/sparc/kernel/irq_64.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/irq_64.c 2010-04-19 15:01:10.000000000 -0700 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -647,6 +648,14 @@ unsigned int sun4v_build_virq(u32 devhan bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); if (unlikely(!bucket)) return 0; + + /* The only reference we store to the IRQ bucket is + * by physical address which kmemleak can't see, tell + * it that this object explicitly is not a leak and + * should be scanned. + */ + kmemleak_not_leak(bucket); + __flush_dcache_range((unsigned long) bucket, ((unsigned long) bucket + sizeof(struct ino_bucket))); @@ -721,7 +730,7 @@ static __attribute__((always_inline)) vo __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); } -void handler_irq(int irq, struct pt_regs *regs) +void __irq_entry handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; struct pt_regs *old_regs; diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/kgdb_64.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/kgdb_64.c --- linux-2.6.34-rc4/arch/sparc/kernel/kgdb_64.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/kgdb_64.c 2010-04-19 15:01:10.000000000 -0700 @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -108,7 +109,7 @@ void gdb_regs_to_pt_regs(unsigned long * } #ifdef CONFIG_SMP -void smp_kgdb_capture_client(int irq, struct pt_regs *regs) +void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) { unsigned long flags; diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/Makefile linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/Makefile --- linux-2.6.34-rc4/arch/sparc/kernel/Makefile 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/Makefile 2010-04-19 15:01:10.000000000 -0700 @@ -13,6 +13,14 @@ extra-y += init_task.o CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) extra-y += vmlinux.lds +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o := -pg +CFLAGS_REMOVE_time_$(BITS).o := -pg +CFLAGS_REMOVE_perf_event.o := -pg +CFLAGS_REMOVE_pcr.o := -pg +endif + obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o obj-$(CONFIG_SPARC32) += etrap_32.o obj-$(CONFIG_SPARC32) += rtrap_32.o @@ -85,7 +93,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -CFLAGS_REMOVE_ftrace.o := -pg +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_EARLYFB) += btext.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/nmi.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/nmi.c --- linux-2.6.34-rc4/arch/sparc/kernel/nmi.c 2010-04-19 14:45:09.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/nmi.c 2010-04-19 15:01:10.000000000 -0700 @@ -92,7 +92,6 @@ static void die_nmi(const char *str, str notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; - int cpu = smp_processor_id(); clear_softint(1 << irq); @@ -106,7 +105,7 @@ notrace __kprobes void perfctr_irq(int i else pcr_ops->write(PCR_PIC_PRIV); - sum = kstat_irqs_cpu(0, cpu); + sum = local_cpu_data().irq0_irqs; if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/pci_common.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/pci_common.c --- linux-2.6.34-rc4/arch/sparc/kernel/pci_common.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/pci_common.c 2010-04-19 15:01:10.000000000 -0700 @@ -371,14 +371,19 @@ static void pci_register_iommu_region(st struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { - prom_printf("Cannot allocate IOMMU resource.\n"); - prom_halt(); + pr_info("%s: Cannot allocate IOMMU resource.\n", + pbm->name); + return; } rp->name = "IOMMU"; rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; rp->end = rp->start + (unsigned long) vdma[1] - 1UL; rp->flags = IORESOURCE_BUSY; - request_resource(&pbm->mem_space, rp); + if (request_resource(&pbm->mem_space, rp)) { + pr_info("%s: Unable to request IOMMU resource.\n", + pbm->name); + kfree(rp); + } } } diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/pcr.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/pcr.c --- linux-2.6.34-rc4/arch/sparc/kernel/pcr.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/pcr.c 2010-04-19 15:01:10.000000000 -0700 @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -34,7 +35,7 @@ unsigned int picl_shift; * Therefore in such situations we defer the work by signalling * a lower level cpu IRQ. */ -void deferred_pcr_work_irq(int irq, struct pt_regs *regs) +void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) { struct pt_regs *old_regs; diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/smp_64.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/smp_64.c --- linux-2.6.34-rc4/arch/sparc/kernel/smp_64.c 2010-04-19 14:45:09.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/smp_64.c 2010-04-19 15:01:10.000000000 -0700 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -823,13 +824,13 @@ void arch_send_call_function_single_ipi( &cpumask_of_cpu(cpu)); } -void smp_call_function_client(int irq, struct pt_regs *regs) +void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_interrupt(); } -void smp_call_function_single_client(int irq, struct pt_regs *regs) +void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); generic_smp_call_function_single_interrupt(); @@ -965,7 +966,7 @@ void flush_dcache_page_all(struct mm_str put_cpu(); } -void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) +void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) { struct mm_struct *mm; unsigned long flags; @@ -1149,7 +1150,7 @@ void smp_release(void) */ extern void prom_world(int); -void smp_penguin_jailcell(int irq, struct pt_regs *regs) +void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) { clear_softint(1 << irq); @@ -1365,7 +1366,7 @@ void smp_send_reschedule(int cpu) &cpumask_of_cpu(cpu)); } -void smp_receive_signal_client(int irq, struct pt_regs *regs) +void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); } diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/time_64.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/time_64.c --- linux-2.6.34-rc4/arch/sparc/kernel/time_64.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/time_64.c 2010-04-19 15:01:10.000000000 -0700 @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -717,7 +718,7 @@ static struct clock_event_device sparc64 }; static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); -void timer_interrupt(int irq, struct pt_regs *regs) +void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long tick_mask = tick_ops->softint_mask; @@ -728,6 +729,7 @@ void timer_interrupt(int irq, struct pt_ irq_enter(); + local_cpu_data().irq0_irqs++; kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); if (unlikely(!evt->event_handler)) { diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/traps_64.c linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/traps_64.c --- linux-2.6.34-rc4/arch/sparc/kernel/traps_64.c 2010-04-19 14:45:09.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/traps_64.c 2010-04-19 15:01:10.000000000 -0700 @@ -2203,27 +2203,6 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); -static inline int is_kernel_stack(struct task_struct *task, - struct reg_window *rw) -{ - unsigned long rw_addr = (unsigned long) rw; - unsigned long thread_base, thread_end; - - if (rw_addr < PAGE_OFFSET) { - if (task != &init_task) - return 0; - } - - thread_base = (unsigned long) task_stack_page(task); - thread_end = thread_base + sizeof(union thread_union); - if (rw_addr >= thread_base && - rw_addr < thread_end && - !(rw_addr & 0x7UL)) - return 1; - - return 0; -} - static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; @@ -2252,6 +2231,7 @@ void die_if_kernel(char *str, struct pt_ show_regs(regs); add_taint(TAINT_DIE); if (regs->tstate & TSTATE_PRIV) { + struct thread_info *tp = current_thread_info(); struct reg_window *rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); @@ -2259,8 +2239,8 @@ void die_if_kernel(char *str, struct pt_ * find some badly aligned kernel stack. */ while (rw && - count++ < 30&& - is_kernel_stack(current, rw)) { + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { printk("Caller[%016lx]: %pS\n", rw->ins[7], (void *) rw->ins[7]); diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/kernel/vmlinux.lds.S linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/vmlinux.lds.S --- linux-2.6.34-rc4/arch/sparc/kernel/vmlinux.lds.S 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/kernel/vmlinux.lds.S 2010-04-19 15:01:10.000000000 -0700 @@ -46,11 +46,16 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT *(.gnu.warning) } = 0 _etext = .; RO_DATA(PAGE_SIZE) + + /* Start of data section */ + _sdata = .; + .data1 : { *(.data1) } diff -urpNa -X dontdiff linux-2.6.34-rc4/arch/sparc/lib/mcount.S linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/lib/mcount.S --- linux-2.6.34-rc4/arch/sparc/lib/mcount.S 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/arch/sparc/lib/mcount.S 2010-04-19 15:01:10.000000000 -0700 @@ -7,26 +7,11 @@ #include -#include -#include - /* * This is the main variant and is called by C code. GCC's -pg option * automatically instruments every C function with a call to this. */ -#ifdef CONFIG_STACK_DEBUG - -#define OVSTACKSIZE 4096 /* lets hope this is enough */ - - .data - .align 8 -panicstring: - .asciz "Stack overflow\n" - .align 8 -ovstack: - .skip OVSTACKSIZE -#endif .text .align 32 .globl _mcount @@ -35,84 +20,48 @@ ovstack: .type mcount,#function _mcount: mcount: -#ifdef CONFIG_STACK_DEBUG - /* - * Check whether %sp is dangerously low. - */ - ldub [%g6 + TI_FPDEPTH], %g1 - srl %g1, 1, %g3 - add %g3, 1, %g3 - sllx %g3, 8, %g3 ! each fpregs frame is 256b - add %g3, 192, %g3 - add %g6, %g3, %g3 ! where does task_struct+frame end? - sub %g3, STACK_BIAS, %g3 - cmp %sp, %g3 - bg,pt %xcc, 1f - nop - lduh [%g6 + TI_CPU], %g1 - sethi %hi(hardirq_stack), %g3 - or %g3, %lo(hardirq_stack), %g3 - sllx %g1, 3, %g1 - ldx [%g3 + %g1], %g7 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - bleu,pt %xcc, 2f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f -2: sethi %hi(softirq_stack), %g3 - or %g3, %lo(softirq_stack), %g3 - ldx [%g3 + %g1], %g7 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - bleu,pt %xcc, 3f - sethi %hi(THREAD_SIZE), %g3 - add %g7, %g3, %g7 - cmp %sp, %g7 - blu,pn %xcc, 1f - nop - /* If we are already on ovstack, don't hop onto it - * again, we are already trying to output the stack overflow - * message. - */ -3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough - or %g7, %lo(ovstack), %g7 - add %g7, OVSTACKSIZE, %g3 - sub %g3, STACK_BIAS + 192, %g3 - sub %g7, STACK_BIAS, %g7 - cmp %sp, %g7 - blu,pn %xcc, 2f - cmp %sp, %g3 - bleu,pn %xcc, 1f - nop -2: mov %g3, %sp - sethi %hi(panicstring), %g3 - call prom_printf - or %g3, %lo(panicstring), %o0 - call prom_halt - nop -1: -#endif #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE - mov %o7, %o0 - .globl mcount_call -mcount_call: - call ftrace_stub - mov %o0, %o7 + /* Do nothing, the retl/nop below is all we need. */ #else - sethi %hi(ftrace_trace_function), %g1 + sethi %hi(function_trace_stop), %g1 + lduw [%g1 + %lo(function_trace_stop)], %g2 + brnz,pn %g2, 2f + sethi %hi(ftrace_trace_function), %g1 sethi %hi(ftrace_stub), %g2 ldx [%g1 + %lo(ftrace_trace_function)], %g1 or %g2, %lo(ftrace_stub), %g2 cmp %g1, %g2 be,pn %icc, 1f - mov %i7, %o1 - jmpl %g1, %g0 - mov %o7, %o0 + mov %i7, %g3 + save %sp, -128, %sp + mov %g3, %o1 + jmpl %g1, %o7 + mov %i7, %o0 + ret + restore /* not reached */ 1: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + sethi %hi(ftrace_graph_return), %g1 + ldx [%g1 + %lo(ftrace_graph_return)], %g3 + cmp %g2, %g3 + bne,pn %xcc, 5f + sethi %hi(ftrace_graph_entry_stub), %g2 + sethi %hi(ftrace_graph_entry), %g1 + or %g2, %lo(ftrace_graph_entry_stub), %g2 + ldx [%g1 + %lo(ftrace_graph_entry)], %g1 + cmp %g1, %g2 + be,pt %xcc, 2f + nop +5: mov %i7, %g2 + mov %fp, %g3 + save %sp, -128, %sp + mov %g2, %l0 + ba,pt %xcc, ftrace_graph_caller + mov %g3, %l1 +#endif +2: #endif #endif retl @@ -131,14 +80,50 @@ ftrace_stub: .globl ftrace_caller .type ftrace_caller,#function ftrace_caller: - mov %i7, %o1 - mov %o7, %o0 + sethi %hi(function_trace_stop), %g1 + mov %i7, %g2 + lduw [%g1 + %lo(function_trace_stop)], %g1 + brnz,pn %g1, ftrace_stub + mov %fp, %g3 + save %sp, -128, %sp + mov %g2, %o1 + mov %g2, %l0 + mov %g3, %l1 .globl ftrace_call ftrace_call: call ftrace_stub - mov %o0, %o7 - retl + mov %i7, %o0 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_call +ftrace_graph_call: + call ftrace_stub nop +#endif + ret + restore +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .size ftrace_graph_call,.-ftrace_graph_call +#endif + .size ftrace_call,.-ftrace_call .size ftrace_caller,.-ftrace_caller #endif #endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +ENTRY(ftrace_graph_caller) + mov %l0, %o0 + mov %i7, %o1 + call prepare_ftrace_return + mov %l1, %o2 + ret + restore %o0, -8, %i7 +END(ftrace_graph_caller) + +ENTRY(return_to_handler) + save %sp, -128, %sp + call ftrace_return_to_handler + mov %fp, %o0 + jmpl %o0 + 8, %g0 + restore +END(return_to_handler) +#endif diff -urpNa -X dontdiff linux-2.6.34-rc4/block/cfq-iosched.c linux-2.6.34-rc4-bc293d62-rcu/block/cfq-iosched.c --- linux-2.6.34-rc4/block/cfq-iosched.c 2010-04-19 14:45:12.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/block/cfq-iosched.c 2010-04-19 16:27:46.000000000 -0700 @@ -3741,7 +3741,6 @@ static void *cfq_init_queue(struct reque * second, in order to have larger depth for async operations. */ cfqd->last_delayed_sync = jiffies - HZ; - INIT_RCU_HEAD(&cfqd->rcu); return cfqd; } diff -urpNa -X dontdiff linux-2.6.34-rc4/block/genhd.c linux-2.6.34-rc4-bc293d62-rcu/block/genhd.c --- linux-2.6.34-rc4/block/genhd.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/block/genhd.c 2010-04-19 16:27:46.000000000 -0700 @@ -987,7 +987,6 @@ int disk_expand_part_tbl(struct gendisk if (!new_ptbl) return -ENOMEM; - INIT_RCU_HEAD(&new_ptbl->rcu_head); new_ptbl->len = target; for (i = 0; i < len; i++) diff -urpNa -X dontdiff linux-2.6.34-rc4/Documentation/networking/timestamping.txt linux-2.6.34-rc4-bc293d62-rcu/Documentation/networking/timestamping.txt --- linux-2.6.34-rc4/Documentation/networking/timestamping.txt 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/Documentation/networking/timestamping.txt 2010-04-19 15:01:10.000000000 -0700 @@ -41,11 +41,12 @@ SOF_TIMESTAMPING_SOFTWARE: return sy SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the following control message: - struct scm_timestamping { - struct timespec systime; - struct timespec hwtimetrans; - struct timespec hwtimeraw; - }; + +struct scm_timestamping { + struct timespec systime; + struct timespec hwtimetrans; + struct timespec hwtimeraw; +}; recvmsg() can be used to get this control message for regular incoming packets. For send time stamps the outgoing packet is looped back to @@ -87,12 +88,13 @@ by the network device and will be empty SIOCSHWTSTAMP: Hardware time stamping must also be initialized for each device driver -that is expected to do hardware time stamping. The parameter is: +that is expected to do hardware time stamping. The parameter is defined in +/include/linux/net_tstamp.h as: struct hwtstamp_config { - int flags; /* no flags defined right now, must be zero */ - int tx_type; /* HWTSTAMP_TX_* */ - int rx_filter; /* HWTSTAMP_FILTER_* */ + int flags; /* no flags defined right now, must be zero */ + int tx_type; /* HWTSTAMP_TX_* */ + int rx_filter; /* HWTSTAMP_FILTER_* */ }; Desired behavior is passed into the kernel and to a specific device by @@ -139,42 +141,56 @@ enum { /* time stamp any incoming packet */ HWTSTAMP_FILTER_ALL, - /* return value: time stamp all packets requested plus some others */ - HWTSTAMP_FILTER_SOME, + /* return value: time stamp all packets requested plus some others */ + HWTSTAMP_FILTER_SOME, /* PTP v1, UDP, any kind of event packet */ HWTSTAMP_FILTER_PTP_V1_L4_EVENT, - ... + /* for the complete list of values, please check + * the include file /include/linux/net_tstamp.h + */ }; DEVICE IMPLEMENTATION A driver which supports hardware time stamping must support the -SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored -in the skb with skb_hwtstamp_set(). +SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with +the actual values as described in the section on SIOCSHWTSTAMP. + +Time stamps for received packets must be stored in the skb. To get a pointer +to the shared time stamp structure of the skb call skb_hwtstamps(). Then +set the time stamps in the structure: + +struct skb_shared_hwtstamps { + /* hardware time stamp transformed into duration + * since arbitrary point in time + */ + ktime_t hwtstamp; + ktime_t syststamp; /* hwtstamp transformed to system time base */ +}; Time stamps for outgoing packets are to be generated as follows: -- In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware() - returns non-zero. If yes, then the driver is expected - to do hardware time stamping. +- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero. + If yes, then the driver is expected to do hardware time stamping. - If this is possible for the skb and requested, then declare - that the driver is doing the time stamping by calling - skb_hwtstamp_tx_in_progress(). A driver not supporting - hardware time stamping doesn't do that. A driver must never - touch sk_buff::tstamp! It is used to store how time stamping - for an outgoing packets is to be done. + that the driver is doing the time stamping by setting the field + skb_tx(skb)->in_progress non-zero. You might want to keep a pointer + to the associated skb for the next step and not free the skb. A driver + not supporting hardware time stamping doesn't do that. A driver must + never touch sk_buff::tstamp! It is used to store software generated + time stamps by the network subsystem. - As soon as the driver has sent the packet and/or obtained a hardware time stamp for it, it passes the time stamp back by calling skb_hwtstamp_tx() with the original skb, the raw - hardware time stamp and a handle to the device (necessary - to convert the hardware time stamp to system time). If obtaining - the hardware time stamp somehow fails, then the driver should - not fall back to software time stamping. The rationale is that - this would occur at a later time in the processing pipeline - than other software time stamping and therefore could lead - to unexpected deltas between time stamps. -- If the driver did not call skb_hwtstamp_tx_in_progress(), then + hardware time stamp. skb_hwtstamp_tx() clones the original skb and + adds the timestamps, therefore the original skb has to be freed now. + If obtaining the hardware time stamp somehow fails, then the driver + should not fall back to software time stamping. The rationale is that + this would occur at a later time in the processing pipeline than other + software time stamping and therefore could lead to unexpected deltas + between time stamps. +- If the driver did not call set skb_tx(skb)->in_progress, then dev_hard_start_xmit() checks whether software time stamping is wanted as fallback and potentially generates the time stamp. diff -urpNa -X dontdiff linux-2.6.34-rc4/Documentation/RCU/checklist.txt linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/checklist.txt --- linux-2.6.34-rc4/Documentation/RCU/checklist.txt 2010-04-19 14:44:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/checklist.txt 2010-04-19 15:01:10.000000000 -0700 @@ -260,7 +260,8 @@ over a rather long period of time, but i The reason that it is permissible to use RCU list-traversal primitives when the update-side lock is held is that doing so can be quite helpful in reducing code bloat when common code is - shared between readers and updaters. + shared between readers and updaters. Additional primitives + are provided for this case, as discussed in lockdep.txt. 10. Conversely, if you are in an RCU read-side critical section, and you don't hold the appropriate update-side lock, you -must- @@ -344,8 +345,8 @@ over a rather long period of time, but i requiring SRCU's read-side deadlock immunity or low read-side realtime latency. - Note that, rcu_assign_pointer() and rcu_dereference() relate to - SRCU just as they do to other forms of RCU. + Note that, rcu_assign_pointer() relates to SRCU just as they do + to other forms of RCU. 15. The whole point of call_rcu(), synchronize_rcu(), and friends is to wait until all pre-existing readers have finished before diff -urpNa -X dontdiff linux-2.6.34-rc4/Documentation/RCU/lockdep.txt linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/lockdep.txt --- linux-2.6.34-rc4/Documentation/RCU/lockdep.txt 2010-04-19 14:44:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/lockdep.txt 2010-04-19 15:01:10.000000000 -0700 @@ -32,9 +32,20 @@ checking of rcu_dereference() primitives srcu_dereference(p, sp): Check for SRCU read-side critical section. rcu_dereference_check(p, c): - Use explicit check expression "c". + Use explicit check expression "c". This is useful in + code that is invoked by both readers and updaters. rcu_dereference_raw(p) Don't check. (Use sparingly, if at all.) + rcu_dereference_protected(p, c): + Use explicit check expression "c", and omit all barriers + and compiler constraints. This is useful when the data + structure cannot change, for example, in code that is + invoked only by updaters. + rcu_access_pointer(p): + Return the value of the pointer and omit all barriers, + but retain the compiler constraints that prevent duplicating + or coalescsing. This is useful when when testing the + value of the pointer itself, for example, against NULL. The rcu_dereference_check() check expression can be any boolean expression, but would normally include one of the rcu_read_lock_held() @@ -59,7 +70,20 @@ In case (1), the pointer is picked up in RCU read-side critical sections, in case (2) the ->file_lock prevents any change from taking place, and finally, in case (3) the current task is the only task accessing the file_struct, again preventing any change -from taking place. +from taking place. If the above statement was invoked only from updater +code, it could instead be written as follows: + + file = rcu_dereference_protected(fdt->fd[fd], + lockdep_is_held(&files->file_lock) || + atomic_read(&files->count) == 1); + +This would verify cases #2 and #3 above, and furthermore lockdep would +complain if this was used in an RCU read-side critical section unless one +of these two cases held. Because rcu_dereference_protected() omits all +barriers and compiler constraints, it generates better code than do the +other flavors of rcu_dereference(). On the other hand, it is illegal +to use rcu_dereference_protected() if either the RCU-protected pointer +or the RCU-protected data that it points to can change concurrently. There are currently only "universal" versions of the rcu_assign_pointer() and RCU list-/tree-traversal primitives, which do not (yet) check for diff -urpNa -X dontdiff linux-2.6.34-rc4/Documentation/RCU/NMI-RCU.txt linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/NMI-RCU.txt --- linux-2.6.34-rc4/Documentation/RCU/NMI-RCU.txt 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/NMI-RCU.txt 2010-04-19 15:01:10.000000000 -0700 @@ -34,7 +34,7 @@ NMI handler. cpu = smp_processor_id(); ++nmi_count(cpu); - if (!rcu_dereference(nmi_callback)(regs, cpu)) + if (!rcu_dereference_sched(nmi_callback)(regs, cpu)) default_do_nmi(regs); nmi_exit(); @@ -47,12 +47,13 @@ function pointer. If this handler retur default_do_nmi() function to handle a machine-specific NMI. Finally, preemption is restored. -Strictly speaking, rcu_dereference() is not needed, since this code runs -only on i386, which does not need rcu_dereference() anyway. However, -it is a good documentation aid, particularly for anyone attempting to -do something similar on Alpha. +In theory, rcu_dereference_sched() is not needed, since this code runs +only on i386, which in theory does not need rcu_dereference_sched() +anyway. However, in practice it is a good documentation aid, particularly +for anyone attempting to do something similar on Alpha or on systems +with aggressive optimizing compilers. -Quick Quiz: Why might the rcu_dereference() be necessary on Alpha, +Quick Quiz: Why might the rcu_dereference_sched() be necessary on Alpha, given that the code referenced by the pointer is read-only? @@ -99,17 +100,21 @@ invoke irq_enter() and irq_exit() on NMI Answer to Quick Quiz - Why might the rcu_dereference() be necessary on Alpha, given + Why might the rcu_dereference_sched() be necessary on Alpha, given that the code referenced by the pointer is read-only? Answer: The caller to set_nmi_callback() might well have - initialized some data that is to be used by the - new NMI handler. In this case, the rcu_dereference() - would be needed, because otherwise a CPU that received - an NMI just after the new handler was set might see - the pointer to the new NMI handler, but the old - pre-initialized version of the handler's data. - - More important, the rcu_dereference() makes it clear - to someone reading the code that the pointer is being - protected by RCU. + initialized some data that is to be used by the new NMI + handler. In this case, the rcu_dereference_sched() would + be needed, because otherwise a CPU that received an NMI + just after the new handler was set might see the pointer + to the new NMI handler, but the old pre-initialized + version of the handler's data. + + This same sad story can happen on other CPUs when using + a compiler with aggressive pointer-value speculation + optimizations. + + More important, the rcu_dereference_sched() makes it + clear to someone reading the code that the pointer is + being protected by RCU-sched. diff -urpNa -X dontdiff linux-2.6.34-rc4/Documentation/RCU/stallwarn.txt linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/stallwarn.txt --- linux-2.6.34-rc4/Documentation/RCU/stallwarn.txt 2010-04-19 14:44:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/stallwarn.txt 2010-04-19 16:27:46.000000000 -0700 @@ -3,35 +3,79 @@ Using RCU's CPU Stall Detector The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables RCU's CPU stall detector, which detects conditions that unduly delay RCU grace periods. The stall detector's idea of what constitutes -"unduly delayed" is controlled by a pair of C preprocessor macros: +"unduly delayed" is controlled by a set of C preprocessor macros: RCU_SECONDS_TILL_STALL_CHECK This macro defines the period of time that RCU will wait from the beginning of a grace period until it issues an RCU CPU - stall warning. It is normally ten seconds. + stall warning. This time period is normally ten seconds. RCU_SECONDS_TILL_STALL_RECHECK This macro defines the period of time that RCU will wait after - issuing a stall warning until it issues another stall warning. - It is normally set to thirty seconds. + issuing a stall warning until it issues another stall warning + for the same stall. This time period is normally set to thirty + seconds. RCU_STALL_RAT_DELAY - The CPU stall detector tries to make the offending CPU rat on itself, - as this often gives better-quality stack traces. However, if - the offending CPU does not detect its own stall in the number - of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will - complain. This is normally set to two jiffies. - -The following problems can result in an RCU CPU stall warning: + The CPU stall detector tries to make the offending CPU print its + own warnings, as this often gives better-quality stack traces. + However, if the offending CPU does not detect its own stall in + the number of jiffies specified by RCU_STALL_RAT_DELAY, then + some other CPU will complain. This delay is normally set to + two jiffies. + +When a CPU detects that it is stalling, it will print a message similar +to the following: + +INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies) + +This message indicates that CPU 5 detected that it was causing a stall, +and that the stall was affecting RCU-sched. This message will normally be +followed by a stack dump of the offending CPU. On TREE_RCU kernel builds, +RCU and RCU-sched are implemented by the same underlying mechanism, +while on TREE_PREEMPT_RCU kernel builds, RCU is instead implemented +by rcu_preempt_state. + +On the other hand, if the offending CPU fails to print out a stall-warning +message quickly enough, some other CPU will print a message similar to +the following: + +INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies) + +This message indicates that CPU 2 detected that CPUs 3 and 5 were both +causing stalls, and that the stall was affecting RCU-bh. This message +will normally be followed by stack dumps for each CPU. Please note that +TREE_PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, +and that the tasks will be indicated by PID, for example, "P3421". +It is even possible for a rcu_preempt_state stall to be caused by both +CPUs -and- tasks, in which case the offending CPUs and tasks will all +be called out in the list. + +Finally, if the grace period ends just as the stall warning starts +printing, there will be a spurious stall-warning message: + +INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies) + +This is rare, but does happen from time to time in real life. + +So your kernel printed an RCU CPU stall warning. The next question is +"What caused it?" The following problems can result in RCU CPU stall +warnings: o A CPU looping in an RCU read-side critical section. -o A CPU looping with interrupts disabled. +o A CPU looping with interrupts disabled. This condition can + result in RCU-sched and RCU-bh stalls. + +o A CPU looping with preemption disabled. This condition can + result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh + stalls. -o A CPU looping with preemption disabled. +o A CPU looping with bottom halves disabled. This condition can + result in RCU-sched and RCU-bh stalls. o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel without invoking schedule(). @@ -39,20 +83,24 @@ o For !CONFIG_PREEMPT kernels, a CPU loo o A bug in the RCU implementation. o A hardware failure. This is quite unlikely, but has occurred - at least once in a former life. A CPU failed in a running system, + at least once in real life. A CPU failed in a running system, becoming unresponsive, but not causing an immediate crash. This resulted in a series of RCU CPU stall warnings, eventually leading the realization that the CPU had failed. -The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning. -SRCU does not do so directly, but its calls to synchronize_sched() will -result in RCU-sched detecting any CPU stalls that might be occurring. - -To diagnose the cause of the stall, inspect the stack traces. The offending -function will usually be near the top of the stack. If you have a series -of stall warnings from a single extended stall, comparing the stack traces -can often help determine where the stall is occurring, which will usually -be in the function nearest the top of the stack that stays the same from -trace to trace. +The RCU, RCU-sched, and RCU-bh implementations have CPU stall +warning. SRCU does not have its own CPU stall warnings, but its +calls to synchronize_sched() will result in RCU-sched detecting +RCU-sched-related CPU stalls. Please note that RCU only detects +CPU stalls when there is a grace period in progress. No grace period, +no CPU stall warnings. + +To diagnose the cause of the stall, inspect the stack traces. +The offending function will usually be near the top of the stack. +If you have a series of stall warnings from a single extended stall, +comparing the stack traces can often help determine where the stall +is occurring, which will usually be in the function nearest the top of +that portion of the stack which remains the same from trace to trace. +If you can reliably trigger the stall, ftrace can be quite helpful. RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. diff -urpNa -X dontdiff linux-2.6.34-rc4/Documentation/RCU/trace.txt linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/trace.txt --- linux-2.6.34-rc4/Documentation/RCU/trace.txt 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/trace.txt 2010-04-19 16:27:46.000000000 -0700 @@ -256,23 +256,23 @@ o Each element of the form "1/1 0:127 ^0 The output of "cat rcu/rcu_pending" looks as follows: rcu_sched: - 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 - 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 - 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 - 3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 - 4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 - 5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 - 6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 - 7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 + 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 + 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 + 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 + 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 + 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 + 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 + 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 + 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 rcu_bh: - 0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 - 1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 - 2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 - 3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 - 4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 - 5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 - 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 - 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 + 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 + 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 + 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 + 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 + 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 + 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 + 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 + 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 As always, this is once again split into "rcu_sched" and "rcu_bh" portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional @@ -284,6 +284,9 @@ o "np" is the number of times that __rcu o "qsp" is the number of times that the RCU was waiting for a quiescent state from this CPU. +o "rpq" is the number of times that the CPU had passed through + a quiescent state, but not yet reported it to RCU. + o "cbr" is the number of times that this CPU had RCU callbacks that had passed through a grace period, and were thus ready to be invoked. diff -urpNa -X dontdiff linux-2.6.34-rc4/Documentation/RCU/whatisRCU.txt linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/whatisRCU.txt --- linux-2.6.34-rc4/Documentation/RCU/whatisRCU.txt 2010-04-19 14:44:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/Documentation/RCU/whatisRCU.txt 2010-04-19 15:01:10.000000000 -0700 @@ -840,6 +840,12 @@ SRCU: Initialization/cleanup init_srcu_struct cleanup_srcu_struct +All: lockdep-checked RCU-protected pointer access + + rcu_dereference_check + rcu_dereference_protected + rcu_access_pointer + See the comment headers in the source code (or the docbook generated from them) for more information. diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/cnic.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/cnic.c --- linux-2.6.34-rc4/drivers/net/cnic.c 2010-04-19 14:45:26.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/cnic.c 2010-04-19 15:01:10.000000000 -0700 @@ -2334,13 +2334,13 @@ static int cnic_service_bnx2x(void *data struct cnic_local *cp = dev->cnic_priv; u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; - prefetch(cp->status_blk.bnx2x); - prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + prefetch(cp->status_blk.bnx2x); + prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) tasklet_schedule(&cp->cnic_irq_task); - - cnic_chk_pkt_rings(cp); + cnic_chk_pkt_rings(cp); + } return 0; } diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/e1000e/netdev.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/e1000e/netdev.c --- linux-2.6.34-rc4/drivers/net/e1000e/netdev.c 2010-04-19 14:45:27.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/e1000e/netdev.c 2010-04-19 15:01:10.000000000 -0700 @@ -661,6 +661,8 @@ static bool e1000_clean_tx_irq(struct e1 i = 0; } + if (i == tx_ring->next_to_use) + break; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/igb/igb_ethtool.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/igb/igb_ethtool.c --- linux-2.6.34-rc4/drivers/net/igb/igb_ethtool.c 2010-04-19 14:45:27.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/igb/igb_ethtool.c 2010-04-19 15:01:10.000000000 -0700 @@ -1814,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_ retval = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* quad port adapters only support WoL on port A */ if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { wol->supported = 0; diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/igb/igb_main.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/igb/igb_main.c --- linux-2.6.34-rc4/drivers/net/igb/igb_main.c 2010-04-19 14:45:27.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/igb/igb_main.c 2010-04-19 15:01:10.000000000 -0700 @@ -1612,6 +1612,7 @@ static int __devinit igb_probe(struct pc adapter->eeprom_wol = 0; break; case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/myri10ge/myri10ge.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/myri10ge/myri10ge.c --- linux-2.6.34-rc4/drivers/net/myri10ge/myri10ge.c 2010-04-19 14:45:28.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/myri10ge/myri10ge.c 2010-04-19 15:01:10.000000000 -0700 @@ -1690,7 +1690,7 @@ myri10ge_set_pauseparam(struct net_devic if (pause->tx_pause != mgp->pause) return myri10ge_change_pause(mgp, pause->tx_pause); if (pause->rx_pause != mgp->pause) - return myri10ge_change_pause(mgp, pause->tx_pause); + return myri10ge_change_pause(mgp, pause->rx_pause); if (pause->autoneg != 0) return -EINVAL; return 0; diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/pcmcia/smc91c92_cs.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/pcmcia/smc91c92_cs.c --- linux-2.6.34-rc4/drivers/net/pcmcia/smc91c92_cs.c 2010-04-19 14:45:29.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/pcmcia/smc91c92_cs.c 2010-04-19 15:01:10.000000000 -0700 @@ -1608,9 +1608,12 @@ static void set_rx_mode(struct net_devic { unsigned int ioaddr = dev->base_addr; struct smc_private *smc = netdev_priv(dev); - u_int multicast_table[ 2 ] = { 0, }; + unsigned char multicast_table[8]; unsigned long flags; u_short rx_cfg_setting; + int i; + + memset(multicast_table, 0, sizeof(multicast_table)); if (dev->flags & IFF_PROMISC) { rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; @@ -1622,10 +1625,6 @@ static void set_rx_mode(struct net_devic netdev_for_each_mc_addr(mc_addr, dev) { u_int position = ether_crc(6, mc_addr->dmi_addr); -#ifndef final_version /* Verify multicast address. */ - if ((mc_addr->dmi_addr[0] & 1) == 0) - continue; -#endif multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); } } @@ -1635,8 +1634,8 @@ static void set_rx_mode(struct net_devic /* Load MC table and Rx setting into the chip without interrupts. */ spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(3); - outl(multicast_table[0], ioaddr + MULTICAST0); - outl(multicast_table[1], ioaddr + MULTICAST4); + for (i = 0; i < 8; i++) + outb(multicast_table[i], ioaddr + MULTICAST0 + i); SMC_SELECT_BANK(0); outw(rx_cfg_setting, ioaddr + RCR); SMC_SELECT_BANK(2); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/qlcnic/qlcnic_hw.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/qlcnic/qlcnic_hw.c --- linux-2.6.34-rc4/drivers/net/qlcnic/qlcnic_hw.c 2010-04-19 14:45:29.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/qlcnic/qlcnic_hw.c 2010-04-19 15:01:10.000000000 -0700 @@ -431,6 +431,9 @@ void qlcnic_set_multi(struct net_device u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + qlcnic_nic_add_mac(adapter, adapter->mac_addr); qlcnic_nic_add_mac(adapter, bcast_addr); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/r6040.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/r6040.c --- linux-2.6.34-rc4/drivers/net/r6040.c 2010-04-19 14:45:29.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/r6040.c 2010-04-19 15:01:10.000000000 -0700 @@ -134,7 +134,7 @@ #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ -#define MCAST_MAX 4 /* Max number multicast addresses to filter */ +#define MCAST_MAX 3 /* Max number multicast addresses to filter */ /* Descriptor status */ #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ @@ -982,9 +982,6 @@ static void r6040_multicast_list(struct crc >>= 26; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } - /* Write the index of the hash table */ - for (i = 0; i < 4; i++) - iowrite16(hash_table[i] << 14, ioaddr + MCR1); /* Fill the MAC hash tables with their values */ iowrite16(hash_table[0], ioaddr + MAR0); iowrite16(hash_table[1], ioaddr + MAR1); @@ -1000,9 +997,9 @@ static void r6040_multicast_list(struct iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); } else { - iowrite16(0xffff, ioaddr + MID_0L + 8 * i); - iowrite16(0xffff, ioaddr + MID_0M + 8 * i); - iowrite16(0xffff, ioaddr + MID_0H + 8 * i); + iowrite16(0xffff, ioaddr + MID_1L + 8 * i); + iowrite16(0xffff, ioaddr + MID_1M + 8 * i); + iowrite16(0xffff, ioaddr + MID_1H + 8 * i); } i++; } diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/stmmac/stmmac_main.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/stmmac/stmmac_main.c --- linux-2.6.34-rc4/drivers/net/stmmac/stmmac_main.c 2010-04-19 14:45:30.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/stmmac/stmmac_main.c 2010-04-19 15:01:10.000000000 -0700 @@ -1686,7 +1686,7 @@ static int stmmac_dvr_probe(struct platf } pr_info("done!\n"); - if (!request_mem_region(res->start, (res->end - res->start), + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { pr_err("%s: ERROR: memory allocation failed" "cannot get the I/O addr 0x%x\n", @@ -1695,9 +1695,9 @@ static int stmmac_dvr_probe(struct platf goto out; } - addr = ioremap(res->start, (res->end - res->start)); + addr = ioremap(res->start, resource_size(res)); if (!addr) { - pr_err("%s: ERROR: memory mapping failed \n", __func__); + pr_err("%s: ERROR: memory mapping failed\n", __func__); ret = -ENOMEM; goto out; } @@ -1775,7 +1775,7 @@ static int stmmac_dvr_probe(struct platf out: if (ret < 0) { platform_set_drvdata(pdev, NULL); - release_mem_region(res->start, (res->end - res->start)); + release_mem_region(res->start, resource_size(res)); if (addr != NULL) iounmap(addr); } @@ -1813,7 +1813,7 @@ static int stmmac_dvr_remove(struct plat iounmap((void *)ndev->base_addr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, (res->end - res->start)); + release_mem_region(res->start, resource_size(res)); free_netdev(ndev); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/virtio_net.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/virtio_net.c --- linux-2.6.34-rc4/drivers/net/virtio_net.c 2010-04-19 14:45:31.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/virtio_net.c 2010-04-19 15:01:10.000000000 -0700 @@ -327,6 +327,7 @@ static int add_recvbuf_small(struct virt struct scatterlist sg[2]; int err; + sg_init_table(sg, 2); skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) return -ENOMEM; @@ -352,6 +353,7 @@ static int add_recvbuf_big(struct virtne char *p; int i, err, offset; + sg_init_table(sg, MAX_SKB_FRAGS + 2); /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(vi, gfp); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/wireless/ath/ath9k/main.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/ath/ath9k/main.c --- linux-2.6.34-rc4/drivers/net/wireless/ath/ath9k/main.c 2010-04-19 14:45:32.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/ath/ath9k/main.c 2010-04-19 15:01:10.000000000 -0700 @@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211 all_wiphys_idle = ath9k_all_wiphys_idle(sc); ath9k_set_wiphy_idle(aphy, idle); - if (!idle && all_wiphys_idle) - enable_radio = true; + enable_radio = (!idle && all_wiphys_idle); /* * After we unlock here its possible another wiphy diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-4965.c --- linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-04-19 14:45:32.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-4965.c 2010-04-19 15:01:10.000000000 -0700 @@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct i IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " "%d index %d\n", scd_ssn , index); freed = iwl_tx_queue_reclaim(priv, txq_id, index); - iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + if (qc) + iwl_free_tfds_in_queue(priv, sta_id, + tid, freed); if (priv->mac80211_registered && (iwl_queue_space(&txq->q) > txq->q.low_mark) && @@ -2041,14 +2043,17 @@ static void iwl4965_rx_reply_tx(struct i tx_resp->failure_frame); freed = iwl_tx_queue_reclaim(priv, txq_id, index); - iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + if (qc && likely(sta_id != IWL_INVALID_STATION)) + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + else if (sta_id == IWL_INVALID_STATION) + IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); if (priv->mac80211_registered && (iwl_queue_space(&txq->q) > txq->q.low_mark)) iwl_wake_queue(priv, txq_id); } - - iwl_txq_check_empty(priv, sta_id, tid, txq_id); + if (qc && likely(sta_id != IWL_INVALID_STATION)) + iwl_txq_check_empty(priv, sta_id, tid, txq_id); if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-agn-rs.c --- linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2010-04-19 14:45:32.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2010-04-19 15:01:10.000000000 -0700 @@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_ra !!(rate_n_flags & RATE_MCS_ANT_C_MSK); } +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + /** * rs_collect_tx_data - Update the success/failure sliding window * @@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_ra * at this rate. window->data contains the bitmask of successful * packets. */ -static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, - int scale_index, s32 tpt, int attempts, - int successes) +static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) { struct iwl_rate_scale_data *window = NULL; static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); - s32 fail_count; + s32 fail_count, tpt; if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) return -EINVAL; /* Select window for current tx bit rate */ - window = &(windows[scale_index]); + window = &(tbl->win[scale_index]); + + /* Get expected throughput */ + tpt = get_expected_tpt(tbl, scale_index); /* * Keep track of only the latest 62 tx frame attempts in this rate's @@ -739,16 +752,6 @@ static bool table_type_matches(struct iw return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && (a->is_SGI == b->is_SGI); } -/* - * Static function to get the expected throughput from an iwl_scale_tbl_info - * that wraps a NULL pointer check - */ -static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) -{ - if (tbl->expected_tpt) - return tbl->expected_tpt[rs_index]; - return 0; -} /* * mac80211 sends us Tx status @@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, s struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_priv *priv = (struct iwl_priv *)priv_r; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct iwl_rate_scale_data *window = NULL; enum mac80211_rate_control_flags mac_flags; u32 tx_rate; struct iwl_scale_tbl_info tbl_type; - struct iwl_scale_tbl_info *curr_tbl, *other_tbl; - s32 tpt = 0; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); @@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, s IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); return; } - window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); /* * Updating the frame history depends on whether packets were @@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, s tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); - tpt = get_expected_tpt(curr_tbl, rs_index); - rs_collect_tx_data(window, rs_index, tpt, + rs_collect_tx_data(curr_tbl, rs_index, info->status.ampdu_ack_len, info->status.ampdu_ack_map); @@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, s * table as active/search. */ if (table_type_matches(&tbl_type, curr_tbl)) - tpt = get_expected_tpt(curr_tbl, rs_index); + tmp_tbl = curr_tbl; else if (table_type_matches(&tbl_type, other_tbl)) - tpt = get_expected_tpt(other_tbl, rs_index); + tmp_tbl = other_tbl; else continue; - - /* Constants mean 1 transmission, 0 successes */ - if (i < retries) - rs_collect_tx_data(window, rs_index, tpt, 1, - 0); - else - rs_collect_tx_data(window, rs_index, tpt, 1, - legacy_success); + rs_collect_tx_data(tmp_tbl, rs_index, 1, + i < retries ? 0 : legacy_success); } /* Update success/fail counts if not searching for new mode */ diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-core.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-core.c --- linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-core.c 2010-04-19 14:45:32.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-core.c 2010-04-19 15:01:10.000000000 -0700 @@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *pri spin_unlock_irqrestore(&priv->lock, flags); - /* Allocate and init all Tx and Command queues */ - ret = iwl_txq_ctx_reset(priv); - if (ret) - return ret; + /* Allocate or reset and init all Tx and Command queues */ + if (!priv->txq) { + ret = iwl_txq_ctx_alloc(priv); + if (ret) + return ret; + } else + iwl_txq_ctx_reset(priv); set_bit(STATUS_INIT, &priv->status); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-core.h linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-core.h --- linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-19 14:45:32.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-core.h 2010-04-19 15:01:10.000000000 -0700 @@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, s /***************************************************** * TX ******************************************************/ -int iwl_txq_ctx_reset(struct iwl_priv *priv); +int iwl_txq_ctx_alloc(struct iwl_priv *priv); +void iwl_txq_ctx_reset(struct iwl_priv *priv); void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, @@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_p void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, int slots_num, u32 txq_id); +void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-tx.c linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-tx.c --- linux-2.6.34-rc4/drivers/net/wireless/iwlwifi/iwl-tx.c 2010-04-19 14:45:32.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/net/wireless/iwlwifi/iwl-tx.c 2010-04-19 15:01:10.000000000 -0700 @@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv struct iwl_queue *q = &txq->q; struct device *dev = &priv->pci_dev->dev; int i; + bool huge = false; if (q->n_bd == 0) return; + for (; q->read_ptr != q->write_ptr; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + /* we have no way to tell if it is a huge cmd ATM */ + i = get_cmd_index(q, q->read_ptr, 0); + + if (txq->meta[i].flags & CMD_SIZE_HUGE) { + huge = true; + continue; + } + + pci_unmap_single(priv->pci_dev, + pci_unmap_addr(&txq->meta[i], mapping), + pci_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + } + if (huge) { + i = q->n_window; + pci_unmap_single(priv->pci_dev, + pci_unmap_addr(&txq->meta[i], mapping), + pci_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + } + /* De-alloc array of command/tx buffers */ for (i = 0; i <= TFD_CMD_SLOTS; i++) kfree(txq->cmd[i]); @@ -410,6 +434,26 @@ out_free_arrays: } EXPORT_SYMBOL(iwl_tx_queue_init); +void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int actual_slots = slots_num; + + if (txq_id == IWL_CMD_QUEUE_NUM) + actual_slots++; + + memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); + + txq->need_update = 0; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); +} +EXPORT_SYMBOL(iwl_tx_queue_reset); + /** * iwl_hw_txq_ctx_free - Free TXQ Context * @@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv /* Tx queues */ if (priv->txq) { - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; - txq_id++) + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) if (txq_id == IWL_CMD_QUEUE_NUM) iwl_cmd_queue_free(priv); else @@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv EXPORT_SYMBOL(iwl_hw_txq_ctx_free); /** - * iwl_txq_ctx_reset - Reset TX queue context - * Destroys all DMA structures and initialize them again + * iwl_txq_ctx_alloc - allocate TX queue context + * Allocate all Tx DMA structures and initialize them * * @param priv * @return error code */ -int iwl_txq_ctx_reset(struct iwl_priv *priv) +int iwl_txq_ctx_alloc(struct iwl_priv *priv) { - int ret = 0; + int ret; int txq_id, slots_num; unsigned long flags; @@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *p return ret; } +void iwl_txq_ctx_reset(struct iwl_priv *priv) +{ + int txq_id, slots_num; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + priv->cfg->ops->lib->txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = txq_id == IWL_CMD_QUEUE_NUM ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); + } +} + /** - * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory + * iwl_txq_ctx_stop - Stop all Tx DMA channels */ void iwl_txq_ctx_stop(struct iwl_priv *priv) { @@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *p 1000); } spin_unlock_irqrestore(&priv->lock, flags); - - /* Deallocate memory for all Tx queues */ - iwl_hw_txq_ctx_free(priv); } EXPORT_SYMBOL(iwl_txq_ctx_stop); @@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *pr spin_lock_irqsave(&priv->hcmd_lock, flags); + /* If this is a huge cmd, mark the huge flag also on the meta.flags + * of the _original_ cmd. This is used for DMA mapping clean up. + */ + if (cmd->flags & CMD_SIZE_HUGE) { + idx = get_cmd_index(q, q->write_ptr, 0); + txq->meta[idx].flags = CMD_SIZE_HUGE; + } + idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; @@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced @@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv return; } - cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); - cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; - meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; + /* If this is a huge cmd, clear the huge flag on the meta.flags + * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap + * the DMA buffer for the scan (huge) command. + */ + if (huge) { + cmd_index = get_cmd_index(&txq->q, index, 0); + txq->meta[cmd_index].flags = 0; + } + cmd_index = get_cmd_index(&txq->q, index, huge); + cmd = txq->cmd[cmd_index]; + meta = &txq->meta[cmd_index]; pci_unmap_single(priv->pci_dev, pci_unmap_addr(meta, mapping), @@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv get_cmd_string(cmd->hdr.cmd)); wake_up_interruptible(&priv->wait_command_queue); } + meta->flags = 0; } EXPORT_SYMBOL(iwl_tx_cmd_complete); diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/staging/batman-adv/hard-interface.c linux-2.6.34-rc4-bc293d62-rcu/drivers/staging/batman-adv/hard-interface.c --- linux-2.6.34-rc4/drivers/staging/batman-adv/hard-interface.c 2010-04-19 14:45:41.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/staging/batman-adv/hard-interface.c 2010-04-19 16:27:46.000000000 -0700 @@ -301,7 +301,6 @@ int hardif_add_interface(char *dev, int batman_if->if_num = if_num; batman_if->dev = dev; batman_if->if_active = IF_INACTIVE; - INIT_RCU_HEAD(&batman_if->rcu); printk(KERN_INFO "batman-adv:Adding interface: %s\n", dev); avail_ifs++; diff -urpNa -X dontdiff linux-2.6.34-rc4/drivers/vhost/vhost.c linux-2.6.34-rc4-bc293d62-rcu/drivers/vhost/vhost.c --- linux-2.6.34-rc4/drivers/vhost/vhost.c 2010-04-19 14:45:47.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/drivers/vhost/vhost.c 2010-04-19 15:01:10.000000000 -0700 @@ -236,6 +236,10 @@ static int vq_memory_access_ok(void __us int log_all) { int i; + + if (!mem) + return 0; + for (i = 0; i < mem->nregions; ++i) { struct vhost_memory_region *m = mem->regions + i; unsigned long a = m->userspace_addr; diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/file.c linux-2.6.34-rc4-bc293d62-rcu/fs/file.c --- linux-2.6.34-rc4/fs/file.c 2010-04-19 14:45:51.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/file.c 2010-04-19 16:27:46.000000000 -0700 @@ -178,7 +178,6 @@ static struct fdtable * alloc_fdtable(un fdt->open_fds = (fd_set *)data; data += nr / BITS_PER_BYTE; fdt->close_on_exec = (fd_set *)data; - INIT_RCU_HEAD(&fdt->rcu); fdt->next = NULL; return fdt; @@ -312,7 +311,6 @@ struct files_struct *dup_fd(struct files new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; new_fdt->open_fds = (fd_set *)&newf->open_fds_init; new_fdt->fd = &newf->fd_array[0]; - INIT_RCU_HEAD(&new_fdt->rcu); new_fdt->next = NULL; spin_lock(&oldf->file_lock); @@ -430,7 +428,6 @@ struct files_struct init_files = { .fd = &init_files.fd_array[0], .close_on_exec = (fd_set *)&init_files.close_on_exec_init, .open_fds = (fd_set *)&init_files.open_fds_init, - .rcu = RCU_HEAD_INIT, }, .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), }; diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/fs-writeback.c linux-2.6.34-rc4-bc293d62-rcu/fs/fs-writeback.c --- linux-2.6.34-rc4/fs/fs-writeback.c 2010-04-19 14:45:51.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/fs-writeback.c 2010-04-19 16:27:46.000000000 -0700 @@ -75,12 +75,33 @@ static inline bool bdi_work_on_stack(str return test_bit(WS_ONSTACK_B, &work->state); } -static inline void bdi_work_init(struct bdi_work *work, - struct wb_writeback_args *args) +static inline void __bdi_work_init(struct bdi_work *work, + struct wb_writeback_args *args, + int on_stack) { - INIT_RCU_HEAD(&work->rcu_head); work->args = *args; work->state = WS_USED; + if (on_stack) { + work->state |= WS_ONSTACK; + init_rcu_head_on_stack(&work->rcu_head); + } +} + +static inline void bdi_work_init(struct bdi_work *work, + struct wb_writeback_args *args) +{ + __bdi_work_init(work, args, false); +} + +static inline void bdi_work_init_on_stack(struct bdi_work *work, + struct wb_writeback_args *args) +{ + __bdi_work_init(work, args, true); +} + +static inline void bdi_destroy_work_on_stack(struct bdi_work *work) +{ + destroy_rcu_head_on_stack(&work->rcu_head); } /** @@ -233,11 +254,11 @@ static void bdi_sync_writeback(struct ba }; struct bdi_work work; - bdi_work_init(&work, &args); - work.state |= WS_ONSTACK; + bdi_work_init_on_stack(&work, &args); bdi_queue_work(bdi, &work); bdi_wait_on_work_clear(&work); + bdi_destroy_work_on_stack(&work); } /** diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/nfs/client.c linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/client.c --- linux-2.6.34-rc4/fs/nfs/client.c 2010-04-19 14:45:52.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/client.c 2010-04-19 15:01:10.000000000 -0700 @@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_s /* Initialise the client representation from the mount data */ server->flags = data->flags; - server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR; + server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR| + NFS_CAP_POSIX_LOCK; server->options = data->options; /* Get a client record */ diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/nfs/dir.c linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/dir.c --- linux-2.6.34-rc4/fs/nfs/dir.c 2010-04-19 14:45:52.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/dir.c 2010-04-19 15:01:10.000000000 -0700 @@ -1025,12 +1025,12 @@ static struct dentry *nfs_atomic_lookup( res = NULL; goto out; /* This turned out not to be a regular file */ + case -EISDIR: case -ENOTDIR: goto no_open; case -ELOOP: if (!(nd->intent.open.flags & O_NOFOLLOW)) goto no_open; - /* case -EISDIR: */ /* case -EINVAL: */ default: goto out; diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/nfs/inode.c linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/inode.c --- linux-2.6.34-rc4/fs/nfs/inode.c 2010-04-19 14:45:52.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/inode.c 2010-04-19 15:01:10.000000000 -0700 @@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_c list_for_each_entry(pos, &nfsi->open_files, list) { if (cred != NULL && pos->cred != cred) continue; - if ((pos->mode & mode) == mode) { - ctx = get_nfs_open_context(pos); - break; - } + if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode) + continue; + ctx = get_nfs_open_context(pos); + break; } spin_unlock(&inode->i_lock); return ctx; diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/nfs/nfs4proc.c linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/nfs4proc.c --- linux-2.6.34-rc4/fs/nfs/nfs4proc.c 2010-04-19 14:45:52.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/nfs4proc.c 2010-04-19 15:01:10.000000000 -0700 @@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_o nfs_post_op_update_inode(dir, o_res->dir_attr); } else nfs_refresh_inode(dir, o_res->dir_attr); + if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) + server->caps &= ~NFS_CAP_POSIX_LOCK; if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(data); if (status != 0) @@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *d status = PTR_ERR(state); if (IS_ERR(state)) goto err_opendata_put; - if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) + if (server->caps & NFS_CAP_POSIX_LOCK) set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/nfs/write.c linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/write.c --- linux-2.6.34-rc4/fs/nfs/write.c 2010-04-19 14:45:52.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/nfs/write.c 2010-04-19 15:01:10.000000000 -0700 @@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct struct inode *inode = page->mapping->host; struct nfs_server *nfss = NFS_SERVER(inode); + page_cache_get(page); if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) { set_bdi_congested(&nfss->backing_dev_info, @@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struc struct nfs_server *nfss = NFS_SERVER(inode); end_page_writeback(page); + page_cache_release(page); if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); } @@ -421,6 +423,7 @@ static void nfs_mark_request_dirty(struct nfs_page *req) { __set_page_dirty_nobuffers(req->wb_page); + __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC); } #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) @@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nf req = nfs_setup_write_request(ctx, page, offset, count); if (IS_ERR(req)) return PTR_ERR(req); + nfs_mark_request_dirty(req); /* Update file length */ nfs_grow_file(page, offset, count); nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); + nfs_mark_request_dirty(req); nfs_clear_page_tag_locked(req); return 0; } @@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, st status = nfs_writepage_setup(ctx, page, offset, count); if (status < 0) nfs_set_pageerror(page); - else - __set_page_dirty_nobuffers(page); dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", status, (long long)i_size_read(inode)); @@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, st static void nfs_writepage_release(struct nfs_page *req) { + struct page *page = req->wb_page; - if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { - nfs_end_page_writeback(req->wb_page); + if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) nfs_inode_remove_request(req); - } else - nfs_end_page_writeback(req->wb_page); nfs_clear_page_tag_locked(req); + nfs_end_page_writeback(page); } static int flush_task_priority(int how) @@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs int how) { struct inode *inode = req->wb_context->path.dentry->d_inode; - int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; int priority = flush_task_priority(how); struct rpc_task *task; struct rpc_message msg = { @@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs .callback_ops = call_ops, .callback_data = data, .workqueue = nfsiod_workqueue, - .flags = flags, + .flags = RPC_TASK_ASYNC, .priority = priority, }; + int ret = 0; /* Set up the RPC argument and reply structs * NB: take care not to mess about with data->commit et al. */ @@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs (unsigned long long)data->args.offset); task = rpc_run_task(&task_setup_data); - if (IS_ERR(task)) - return PTR_ERR(task); + if (IS_ERR(task)) { + ret = PTR_ERR(task); + goto out; + } + if (how & FLUSH_SYNC) { + ret = rpc_wait_for_completion_task(task); + if (ret == 0) + ret = task->tk_status; + } rpc_put_task(task); - return 0; +out: + return ret; } /* If a nfs_flush_* function fails, it should remove reqs from @head and @@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs */ static void nfs_redirty_request(struct nfs_page *req) { + struct page *page = req->wb_page; + nfs_mark_request_dirty(req); - nfs_end_page_writeback(req->wb_page); nfs_clear_page_tag_locked(req); + nfs_end_page_writeback(page); } /* @@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(v if (nfs_write_need_commit(data)) { memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); nfs_mark_request_commit(req); - nfs_end_page_writeback(page); dprintk(" marked for commit\n"); goto next; } dprintk(" OK\n"); remove_request: - nfs_end_page_writeback(page); nfs_inode_remove_request(req); next: nfs_clear_page_tag_locked(req); + nfs_end_page_writeback(page); } nfs_writedata_release(calldata); } @@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct li { struct nfs_page *first = nfs_list_entry(head->next); struct inode *inode = first->wb_context->path.dentry->d_inode; - int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; int priority = flush_task_priority(how); struct rpc_task *task; struct rpc_message msg = { @@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct li .callback_ops = &nfs_commit_ops, .callback_data = data, .workqueue = nfsiod_workqueue, - .flags = flags, + .flags = RPC_TASK_ASYNC, .priority = priority, }; @@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct li task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); + if (how & FLUSH_SYNC) + rpc_wait_for_completion_task(task); rpc_put_task(task); return 0; } diff -urpNa -X dontdiff linux-2.6.34-rc4/fs/partitions/check.c linux-2.6.34-rc4-bc293d62-rcu/fs/partitions/check.c --- linux-2.6.34-rc4/fs/partitions/check.c 2010-04-19 14:45:53.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/fs/partitions/check.c 2010-04-19 16:27:46.000000000 -0700 @@ -456,7 +456,6 @@ struct hd_struct *add_partition(struct g } /* everything is up and running, commence */ - INIT_RCU_HEAD(&p->rcu_head); rcu_assign_pointer(ptbl->part[partno], p); /* suppress uevent if the disk supresses it */ diff -urpNa -X dontdiff linux-2.6.34-rc4/include/linux/debugobjects.h linux-2.6.34-rc4-bc293d62-rcu/include/linux/debugobjects.h --- linux-2.6.34-rc4/include/linux/debugobjects.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/include/linux/debugobjects.h 2010-04-19 16:27:46.000000000 -0700 @@ -20,12 +20,14 @@ struct debug_obj_descr; * struct debug_obj - representaion of an tracked object * @node: hlist node to link the object into the tracker list * @state: tracked object state + * @astate: current active state * @object: pointer to the real object * @descr: pointer to an object type specific debug description structure */ struct debug_obj { struct hlist_node node; enum debug_obj_state state; + unsigned int astate; void *object; struct debug_obj_descr *descr; }; @@ -60,6 +62,15 @@ extern void debug_object_deactivate(void extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +/* + * Active state: + * - Set at 0 upon initialization. + * - Must return to 0 before deactivation. + */ +extern void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next); + extern void debug_objects_early_init(void); extern void debug_objects_mem_init(void); #else diff -urpNa -X dontdiff linux-2.6.34-rc4/include/linux/init_task.h linux-2.6.34-rc4-bc293d62-rcu/include/linux/init_task.h --- linux-2.6.34-rc4/include/linux/init_task.h 2010-04-19 14:45:55.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/include/linux/init_task.h 2010-04-19 16:27:46.000000000 -0700 @@ -49,7 +49,6 @@ extern struct group_info init_groups; { .first = &init_task.pids[PIDTYPE_PGID].node }, \ { .first = &init_task.pids[PIDTYPE_SID].node }, \ }, \ - .rcu = RCU_HEAD_INIT, \ .level = 0, \ .numbers = { { \ .nr = 0, \ diff -urpNa -X dontdiff linux-2.6.34-rc4/include/linux/nfs_fs_sb.h linux-2.6.34-rc4-bc293d62-rcu/include/linux/nfs_fs_sb.h --- linux-2.6.34-rc4/include/linux/nfs_fs_sb.h 2010-04-19 14:45:56.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/include/linux/nfs_fs_sb.h 2010-04-19 15:01:10.000000000 -0700 @@ -176,6 +176,7 @@ struct nfs_server { #define NFS_CAP_ATIME (1U << 11) #define NFS_CAP_CTIME (1U << 12) #define NFS_CAP_MTIME (1U << 13) +#define NFS_CAP_POSIX_LOCK (1U << 14) /* maximum number of slots to use */ diff -urpNa -X dontdiff linux-2.6.34-rc4/include/linux/rcupdate.h linux-2.6.34-rc4-bc293d62-rcu/include/linux/rcupdate.h --- linux-2.6.34-rc4/include/linux/rcupdate.h 2010-04-19 14:45:56.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/include/linux/rcupdate.h 2010-04-19 16:27:46.000000000 -0700 @@ -40,6 +40,7 @@ #include #include #include +#include #ifdef CONFIG_RCU_TORTURE_TEST extern int rcutorture_runnable; /* for sysctl */ @@ -56,8 +57,6 @@ struct rcu_head { }; /* Exported common interfaces */ -extern void synchronize_rcu_bh(void); -extern void synchronize_sched(void); extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); @@ -66,8 +65,6 @@ extern int sched_expedited_torture_stats /* Internal to kernel */ extern void rcu_init(void); -extern int rcu_scheduler_active; -extern void rcu_scheduler_starting(void); #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include @@ -77,11 +74,24 @@ extern void rcu_scheduler_starting(void) #error "Unknown RCU implementation specified to kernel configuration" #endif -#define RCU_HEAD_INIT { .next = NULL, .func = NULL } -#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT -#define INIT_RCU_HEAD(ptr) do { \ - (ptr)->next = NULL; (ptr)->func = NULL; \ -} while (0) +/* + * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic + * initialization and destruction of rcu_head on the stack. rcu_head structures + * allocated dynamically in the heap or defined statically don't need any + * initialization. + */ +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +extern void init_rcu_head_on_stack(struct rcu_head *head); +extern void destroy_rcu_head_on_stack(struct rcu_head *head); +#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static inline void init_rcu_head_on_stack(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head_on_stack(struct rcu_head *head) +{ +} +#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -101,20 +111,18 @@ extern struct lockdep_map rcu_sched_lock # define rcu_read_release_sched() \ lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) -static inline int debug_lockdep_rcu_enabled(void) -{ - return likely(rcu_scheduler_active && debug_locks); -} +extern int debug_lockdep_rcu_enabled(void); /** * rcu_read_lock_held - might we be in RCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ static inline int rcu_read_lock_held(void) { @@ -132,13 +140,15 @@ extern int rcu_read_lock_bh_held(void); /** * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an - * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, - * this assumes we are in an RCU-sched read-side critical section unless it - * can prove otherwise. Note that disabling of preemption (including - * disabling irqs) counts as an RCU-sched read-side critical section. + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an + * RCU-sched read-side critical section. In absence of + * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side + * critical section unless it can prove otherwise. Note that disabling + * of preemption (including disabling irqs) counts as an RCU-sched + * read-side critical section. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) @@ -180,7 +190,7 @@ static inline int rcu_read_lock_bh_held( #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { - return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); + return preempt_count() != 0 || irqs_disabled(); } #else /* #ifdef CONFIG_PREEMPT */ static inline int rcu_read_lock_sched_held(void) @@ -195,12 +205,30 @@ static inline int rcu_read_lock_sched_he /** * rcu_dereference_check - rcu_dereference with debug checking + * @p: The pointer to read, prior to dereferencing + * @c: The conditions under which the dereference will take place * - * Do an rcu_dereference(), but check that the context is correct. - * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to - * ensure that the rcu_dereference_check() executes within an RCU - * read-side critical section. It is also possible to check for - * locks being held, for example, by using lockdep_is_held(). + * Do an rcu_dereference(), but check that the conditions under which the + * dereference will take place are correct. Typically the conditions indicate + * the various locking conditions that should be held at that point. The check + * should return true if the conditions are satisfied. + * + * For example: + * + * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || + * lockdep_is_held(&foo->lock)); + * + * could be used to indicate to lockdep that foo->bar may only be dereferenced + * if either the RCU read lock is held, or that the lock required to replace + * the bar struct at foo->bar is held. + * + * Note that the list of conditions may also include indications of when a lock + * need not be held, for example during initialisation or destruction of the + * target struct: + * + * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || + * lockdep_is_held(&foo->lock) || + * atomic_read(&foo->usage) == 0); */ #define rcu_dereference_check(p, c) \ ({ \ @@ -209,13 +237,45 @@ static inline int rcu_read_lock_sched_he rcu_dereference_raw(p); \ }) +/** + * rcu_dereference_protected - fetch RCU pointer when updates prevented + * + * Return the value of the specified RCU-protected pointer, but omit + * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This + * is useful in cases where update-side locks prevent the value of the + * pointer from changing. Please note that this primitive does -not- + * prevent the compiler from repeating this reference or combining it + * with other references, so it should not be used without protection + * of appropriate locks. + */ +#define rcu_dereference_protected(p, c) \ + ({ \ + if (debug_lockdep_rcu_enabled() && !(c)) \ + lockdep_rcu_dereference(__FILE__, __LINE__); \ + (p); \ + }) + #else /* #ifdef CONFIG_PROVE_RCU */ #define rcu_dereference_check(p, c) rcu_dereference_raw(p) +#define rcu_dereference_protected(p, c) (p) #endif /* #else #ifdef CONFIG_PROVE_RCU */ /** + * rcu_access_pointer - fetch RCU pointer with no dereferencing + * + * Return the value of the specified RCU-protected pointer, but omit the + * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * when the value of this pointer is accessed, but the pointer is not + * dereferenced, for example, when testing an RCU-protected pointer against + * NULL. This may also be used in cases where update-side locks prevent + * the value of the pointer from changing, but rcu_dereference_protected() + * is a lighter-weight primitive for this use case. + */ +#define rcu_access_pointer(p) ACCESS_ONCE(p) + +/** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * * When synchronize_rcu() is invoked on one CPU while other CPUs @@ -454,4 +514,41 @@ extern void call_rcu(struct rcu_head *he extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); +/* + * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally + * by call_rcu() and rcu callback execution, and are therefore not part of the + * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. + */ + +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +# define STATE_RCU_HEAD_READY 0 +# define STATE_RCU_HEAD_QUEUED 1 + +extern struct debug_obj_descr rcuhead_debug_descr; + +static inline void debug_rcu_head_queue(struct rcu_head *head) +{ + debug_object_activate(head, &rcuhead_debug_descr); + debug_object_active_state(head, &rcuhead_debug_descr, + STATE_RCU_HEAD_READY, + STATE_RCU_HEAD_QUEUED); +} + +static inline void debug_rcu_head_unqueue(struct rcu_head *head) +{ + debug_object_active_state(head, &rcuhead_debug_descr, + STATE_RCU_HEAD_QUEUED, + STATE_RCU_HEAD_READY); + debug_object_deactivate(head, &rcuhead_debug_descr); +} +#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static inline void debug_rcu_head_queue(struct rcu_head *head) +{ +} + +static inline void debug_rcu_head_unqueue(struct rcu_head *head) +{ +} +#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ + #endif /* __LINUX_RCUPDATE_H */ diff -urpNa -X dontdiff linux-2.6.34-rc4/include/linux/rcutiny.h linux-2.6.34-rc4-bc293d62-rcu/include/linux/rcutiny.h --- linux-2.6.34-rc4/include/linux/rcutiny.h 2010-04-19 14:45:56.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/include/linux/rcutiny.h 2010-04-19 16:27:46.000000000 -0700 @@ -29,6 +29,10 @@ void rcu_sched_qs(int cpu); void rcu_bh_qs(int cpu); +static inline void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); +} #define __rcu_read_lock() preempt_disable() #define __rcu_read_unlock() preempt_enable() @@ -74,7 +78,17 @@ static inline void rcu_sched_force_quies { } -#define synchronize_rcu synchronize_sched +extern void synchronize_sched(void); + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + +static inline void synchronize_rcu_bh(void) +{ + synchronize_sched(); +} static inline void synchronize_rcu_expedited(void) { @@ -114,4 +128,17 @@ static inline int rcu_preempt_depth(void return 0; } +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +extern int rcu_scheduler_active __read_mostly; +extern void rcu_scheduler_starting(void); + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void rcu_scheduler_starting(void) +{ +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + #endif /* __LINUX_RCUTINY_H */ diff -urpNa -X dontdiff linux-2.6.34-rc4/include/linux/rcutree.h linux-2.6.34-rc4-bc293d62-rcu/include/linux/rcutree.h --- linux-2.6.34-rc4/include/linux/rcutree.h 2010-04-19 14:45:56.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/include/linux/rcutree.h 2010-04-19 16:27:46.000000000 -0700 @@ -34,6 +34,7 @@ struct notifier_block; extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); +extern void rcu_note_context_switch(int cpu); extern int rcu_needs_cpu(int cpu); extern int rcu_expedited_torture_stats(char *page); @@ -86,6 +87,8 @@ static inline void __rcu_read_unlock_bh( extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +extern void synchronize_rcu_bh(void); +extern void synchronize_sched(void); extern void synchronize_rcu_expedited(void); static inline void synchronize_rcu_bh_expedited(void) @@ -120,4 +123,7 @@ static inline int rcu_blocking_is_gp(voi return num_online_cpus() == 1; } +extern void rcu_scheduler_starting(void); +extern int rcu_scheduler_active __read_mostly; + #endif /* __LINUX_RCUTREE_H */ diff -urpNa -X dontdiff linux-2.6.34-rc4/include/linux/srcu.h linux-2.6.34-rc4-bc293d62-rcu/include/linux/srcu.h --- linux-2.6.34-rc4/include/linux/srcu.h 2010-04-19 14:45:56.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/include/linux/srcu.h 2010-04-19 16:27:46.000000000 -0700 @@ -84,8 +84,8 @@ long srcu_batches_completed(struct srcu_ /** * srcu_read_lock_held - might we be in SRCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. */ diff -urpNa -X dontdiff linux-2.6.34-rc4/include/net/x25.h linux-2.6.34-rc4-bc293d62-rcu/include/net/x25.h --- linux-2.6.34-rc4/include/net/x25.h 2010-04-19 14:45:57.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/include/net/x25.h 2010-04-19 15:01:10.000000000 -0700 @@ -183,6 +183,10 @@ extern int sysctl_x25_clear_request_tim extern int sysctl_x25_ack_holdback_timeout; extern int sysctl_x25_forward; +extern int x25_parse_address_block(struct sk_buff *skb, + struct x25_address *called_addr, + struct x25_address *calling_addr); + extern int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *); extern int x25_addr_aton(unsigned char *, struct x25_address *, diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/power/user.c linux-2.6.34-rc4-bc293d62-rcu/kernel/power/user.c --- linux-2.6.34-rc4/kernel/power/user.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/power/user.c 2010-04-19 15:01:10.000000000 -0700 @@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file * * User space encodes device types as two-byte values, * so we need to recode them */ - swdev = old_decode_dev(swap_area.dev); + swdev = new_decode_dev(swap_area.dev); if (swdev) { offset = swap_area.offset; data->swap = swap_type_of(swdev, offset, NULL); diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcupdate.c linux-2.6.34-rc4-bc293d62-rcu/kernel/rcupdate.c --- linux-2.6.34-rc4/kernel/rcupdate.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcupdate.c 2010-04-19 16:27:46.000000000 -0700 @@ -44,7 +44,6 @@ #include #include #include -#include #include #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -64,11 +63,15 @@ struct lockdep_map rcu_sched_lock_map = EXPORT_SYMBOL_GPL(rcu_sched_lock_map); #endif -int rcu_scheduler_active __read_mostly; -EXPORT_SYMBOL_GPL(rcu_scheduler_active); - #ifdef CONFIG_DEBUG_LOCK_ALLOC +int debug_lockdep_rcu_enabled(void) +{ + return rcu_scheduler_active && debug_locks && + current->lockdep_recursion == 0; +} +EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); + /** * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? * @@ -90,21 +93,6 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held) #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /* - * This function is invoked towards the end of the scheduler's initialization - * process. Before this is called, the idle task might contain - * RCU read-side critical sections (during which time, this idle - * task is booting the system). After this function is called, the - * idle tasks are prohibited from containing RCU read-side critical - * sections. - */ -void rcu_scheduler_starting(void) -{ - WARN_ON(num_online_cpus() != 1); - WARN_ON(nr_context_switches() > 0); - rcu_scheduler_active = 1; -} - -/* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. */ @@ -115,3 +103,152 @@ void wakeme_after_rcu(struct rcu_head * rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion); } + +#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +static inline void debug_init_rcu_head(struct rcu_head *head) +{ + debug_object_init(head, &rcuhead_debug_descr); +} + +static inline void debug_rcu_head_free(struct rcu_head *head) +{ + debug_object_free(head, &rcuhead_debug_descr); +} + +/* + * fixup_init is called when: + * - an active object is initialized + */ +static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) +{ + struct rcu_head *head = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + /* + * Ensure that queued callbacks are all executed. + * If we detect that we are nested in a RCU read-side critical + * section, we should simply fail, otherwise we would deadlock. + */ +#ifndef CONFIG_PREEMPT + WARN_ON(1); + return 0; +#else + if (rcu_preempt_depth() != 0 || preempt_count() != 0 || + irqs_disabled()) { + WARN_ON(1); + return 0; + } + rcu_barrier(); + rcu_barrier_sched(); + rcu_barrier_bh(); + debug_object_init(head, &rcuhead_debug_descr); + return 1; +#endif + default: + return 0; + } +} + +/* + * fixup_activate is called when: + * - an active object is activated + * - an unknown object is activated (might be a statically initialized object) + * Activation is performed internally by call_rcu(). + */ +static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) +{ + struct rcu_head *head = addr; + + switch (state) { + + case ODEBUG_STATE_NOTAVAILABLE: + /* + * This is not really a fixup. We just make sure that it is + * tracked in the object tracker. + */ + debug_object_init(head, &rcuhead_debug_descr); + debug_object_activate(head, &rcuhead_debug_descr); + return 0; + + case ODEBUG_STATE_ACTIVE: + /* + * Ensure that queued callbacks are all executed. + * If we detect that we are nested in a RCU read-side critical + * section, we should simply fail, otherwise we would deadlock. + */ +#ifndef CONFIG_PREEMPT + WARN_ON(1); + return 0; +#else + if (rcu_preempt_depth() != 0 || preempt_count() != 0 || + irqs_disabled()) { + WARN_ON(1); + return 0; + } + rcu_barrier(); + rcu_barrier_sched(); + rcu_barrier_bh(); + debug_object_activate(head, &rcuhead_debug_descr); + return 1; +#endif + default: + return 0; + } +} + +/* + * fixup_free is called when: + * - an active object is freed + */ +static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) +{ + struct rcu_head *head = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + /* + * Ensure that queued callbacks are all executed. + * If we detect that we are nested in a RCU read-side critical + * section, we should simply fail, otherwise we would deadlock. + */ +#ifndef CONFIG_PREEMPT + WARN_ON(1); + return 0; +#else + if (rcu_preempt_depth() != 0 || preempt_count() != 0 || + irqs_disabled()) { + WARN_ON(1); + return 0; + } + rcu_barrier(); + rcu_barrier_sched(); + rcu_barrier_bh(); + debug_object_free(head, &rcuhead_debug_descr); + return 1; +#endif + default: + return 0; + } +} + +void init_rcu_head_on_stack(struct rcu_head *head) +{ + debug_object_init_on_stack(head, &rcuhead_debug_descr); +} +EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); + +void destroy_rcu_head_on_stack(struct rcu_head *head) +{ + debug_object_free(head, &rcuhead_debug_descr); +} +EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); + +struct debug_obj_descr rcuhead_debug_descr = { + .name = "rcu_head", + .fixup_init = rcuhead_fixup_init, + .fixup_activate = rcuhead_fixup_activate, + .fixup_free = rcuhead_fixup_free, +}; +EXPORT_SYMBOL_GPL(rcuhead_debug_descr); +#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcutiny.c linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutiny.c --- linux-2.6.34-rc4/kernel/rcutiny.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutiny.c 2010-04-19 16:27:46.000000000 -0700 @@ -44,9 +44,9 @@ struct rcu_ctrlblk { }; /* Definition for rcupdate control block. */ -static struct rcu_ctrlblk rcu_ctrlblk = { - .donetail = &rcu_ctrlblk.rcucblist, - .curtail = &rcu_ctrlblk.rcucblist, +static struct rcu_ctrlblk rcu_sched_ctrlblk = { + .donetail = &rcu_sched_ctrlblk.rcucblist, + .curtail = &rcu_sched_ctrlblk.rcucblist, }; static struct rcu_ctrlblk rcu_bh_ctrlblk = { @@ -54,6 +54,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk .curtail = &rcu_bh_ctrlblk.rcucblist, }; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +int rcu_scheduler_active __read_mostly; +EXPORT_SYMBOL_GPL(rcu_scheduler_active); +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + #ifdef CONFIG_NO_HZ static long rcu_dynticks_nesting = 1; @@ -108,7 +113,8 @@ static int rcu_qsctr_help(struct rcu_ctr */ void rcu_sched_qs(int cpu) { - if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) + if (rcu_qsctr_help(&rcu_sched_ctrlblk) + + rcu_qsctr_help(&rcu_bh_ctrlblk)) raise_softirq(RCU_SOFTIRQ); } @@ -163,6 +169,7 @@ static void __rcu_process_callbacks(stru while (list) { next = list->next; prefetch(next); + debug_rcu_head_unqueue(list); list->func(list); list = next; } @@ -173,7 +180,7 @@ static void __rcu_process_callbacks(stru */ static void rcu_process_callbacks(struct softirq_action *unused) { - __rcu_process_callbacks(&rcu_ctrlblk); + __rcu_process_callbacks(&rcu_sched_ctrlblk); __rcu_process_callbacks(&rcu_bh_ctrlblk); } @@ -187,7 +194,8 @@ static void rcu_process_callbacks(struct * * Cool, huh? (Due to Josh Triplett.) * - * But we want to make this a static inline later. + * But we want to make this a static inline later. The cond_resched() + * currently makes this problematic. */ void synchronize_sched(void) { @@ -195,12 +203,6 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); -void synchronize_rcu_bh(void) -{ - synchronize_sched(); -} -EXPORT_SYMBOL_GPL(synchronize_rcu_bh); - /* * Helper function for call_rcu() and call_rcu_bh(). */ @@ -210,6 +212,7 @@ static void __call_rcu(struct rcu_head * { unsigned long flags; + debug_rcu_head_queue(head); head->func = func; head->next = NULL; @@ -226,7 +229,7 @@ static void __call_rcu(struct rcu_head * */ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { - __call_rcu(head, func, &rcu_ctrlblk); + __call_rcu(head, func, &rcu_sched_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu); @@ -244,11 +247,13 @@ void rcu_barrier(void) { struct rcu_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(rcu_barrier); @@ -256,11 +261,13 @@ void rcu_barrier_bh(void) { struct rcu_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_bh(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -268,11 +275,13 @@ void rcu_barrier_sched(void) { struct rcu_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_sched(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(rcu_barrier_sched); @@ -280,3 +289,5 @@ void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } + +#include "rcutiny_plugin.h" diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcutiny_plugin.h linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutiny_plugin.h --- linux-2.6.34-rc4/kernel/rcutiny_plugin.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutiny_plugin.h 2010-04-19 16:27:46.000000000 -0700 @@ -0,0 +1,39 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * Internal non-public definitions that provide either classic + * or preemptable semantics. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2009 + * + * Author: Paul E. McKenney + */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +#include + +/* + * During boot, we forgive RCU lockdep issues. After this function is + * invoked, we start taking RCU lockdep issues seriously. + */ +void rcu_scheduler_starting(void) +{ + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; +} + +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcutorture.c linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutorture.c --- linux-2.6.34-rc4/kernel/rcutorture.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutorture.c 2010-04-19 16:27:46.000000000 -0700 @@ -464,9 +464,11 @@ static void rcu_bh_torture_synchronize(v { struct rcu_bh_torture_synchronize rcu; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } static struct rcu_torture_ops rcu_bh_ops = { diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcutree.c linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree.c --- linux-2.6.34-rc4/kernel/rcutree.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree.c 2010-04-19 16:27:46.000000000 -0700 @@ -46,6 +46,7 @@ #include #include #include +#include #include "rcutree.h" @@ -53,8 +54,8 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; -#define RCU_STATE_INITIALIZER(name) { \ - .level = { &name.node[0] }, \ +#define RCU_STATE_INITIALIZER(structname) { \ + .level = { &structname.node[0] }, \ .levelcnt = { \ NUM_RCU_LVL_0, /* root of hierarchy. */ \ NUM_RCU_LVL_1, \ @@ -65,13 +66,14 @@ static struct lock_class_key rcu_node_cl .signaled = RCU_GP_IDLE, \ .gpnum = -300, \ .completed = -300, \ - .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \ + .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ .orphan_cbs_list = NULL, \ - .orphan_cbs_tail = &name.orphan_cbs_list, \ + .orphan_cbs_tail = &structname.orphan_cbs_list, \ .orphan_qlen = 0, \ - .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \ + .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ .n_force_qs = 0, \ .n_force_qs_ngp = 0, \ + .name = #structname, \ } struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); @@ -80,6 +82,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sche struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); +int rcu_scheduler_active __read_mostly; +EXPORT_SYMBOL_GPL(rcu_scheduler_active); + /* * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s * permit this function to be invoked without holding the root rcu_node @@ -97,25 +102,32 @@ static int rcu_gp_in_progress(struct rcu */ void rcu_sched_qs(int cpu) { - struct rcu_data *rdp; + struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); - rdp = &per_cpu(rcu_sched_data, cpu); rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; - rcu_preempt_note_context_switch(cpu); } void rcu_bh_qs(int cpu) { - struct rcu_data *rdp; + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp = &per_cpu(rcu_bh_data, cpu); rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; } +/* + * Note a context switch. This is a quiescent state for RCU-sched, + * and requires special handling for preemptible RCU. + */ +void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); + rcu_preempt_note_context_switch(cpu); +} + #ifdef CONFIG_NO_HZ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = 1, @@ -438,6 +450,8 @@ static int rcu_implicit_dynticks_qs(stru #ifdef CONFIG_RCU_CPU_STALL_DETECTOR +int rcu_cpu_stall_panicking __read_mostly; + static void record_gp_stall_check_time(struct rcu_state *rsp) { rsp->gp_start = jiffies; @@ -470,7 +484,8 @@ static void print_other_cpu_stall(struct /* OK, time to rat on our buddy... */ - printk(KERN_ERR "INFO: RCU detected CPU stalls:"); + printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", + rsp->name); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave(&rnp->lock, flags); rcu_print_task_stall(rnp); @@ -481,7 +496,7 @@ static void print_other_cpu_stall(struct if (rnp->qsmask & (1UL << cpu)) printk(" %d", rnp->grplo + cpu); } - printk(" (detected by %d, t=%ld jiffies)\n", + printk("} (detected by %d, t=%ld jiffies)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start)); trigger_all_cpu_backtrace(); @@ -497,8 +512,8 @@ static void print_cpu_stall(struct rcu_s unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); - printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", - smp_processor_id(), jiffies - rsp->gp_start); + printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", + rsp->name, smp_processor_id(), jiffies - rsp->gp_start); trigger_all_cpu_backtrace(); raw_spin_lock_irqsave(&rnp->lock, flags); @@ -515,6 +530,8 @@ static void check_cpu_stall(struct rcu_s long delta; struct rcu_node *rnp; + if (rcu_cpu_stall_panicking) + return; delta = jiffies - rsp->jiffies_stall; rnp = rdp->mynode; if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { @@ -529,6 +546,21 @@ static void check_cpu_stall(struct rcu_s } } +static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) +{ + rcu_cpu_stall_panicking = 1; + return NOTIFY_DONE; +} + +static struct notifier_block rcu_panic_block = { + .notifier_call = rcu_panic, +}; + +static void __init check_cpu_stall_init(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); +} + #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void record_gp_stall_check_time(struct rcu_state *rsp) @@ -539,6 +571,10 @@ static void check_cpu_stall(struct rcu_s { } +static void __init check_cpu_stall_init(void) +{ +} + #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ /* @@ -1076,6 +1112,7 @@ static void rcu_do_batch(struct rcu_stat while (list) { next = list->next; prefetch(next); + debug_rcu_head_unqueue(list); list->func(list); list = next; if (++count >= rdp->blimit) @@ -1125,8 +1162,6 @@ static void rcu_do_batch(struct rcu_stat */ void rcu_check_callbacks(int cpu, int user) { - if (!rcu_pending(cpu)) - return; /* if nothing for RCU to do. */ if (user || (idle_cpu(cpu) && rcu_scheduler_active && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { @@ -1158,7 +1193,8 @@ void rcu_check_callbacks(int cpu, int us rcu_bh_qs(cpu); } rcu_preempt_check_callbacks(cpu); - raise_softirq(RCU_SOFTIRQ); + if (rcu_pending(cpu)) + raise_softirq(RCU_SOFTIRQ); } #ifdef CONFIG_SMP @@ -1236,11 +1272,11 @@ static void force_quiescent_state(struct break; /* grace period idle or initializing, ignore. */ case RCU_SAVE_DYNTICK: - - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) break; /* So gcc recognizes the dead code. */ + raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ + /* Record dyntick-idle state. */ force_qs_rnp(rsp, dyntick_save_progress_counter); raw_spin_lock(&rnp->lock); /* irqs already disabled */ @@ -1353,6 +1389,7 @@ __call_rcu(struct rcu_head *head, void ( unsigned long flags; struct rcu_data *rdp; + debug_rcu_head_queue(head); head->func = func; head->next = NULL; @@ -1449,11 +1486,13 @@ void synchronize_sched(void) if (rcu_blocking_is_gp()) return; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_sched(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(synchronize_sched); @@ -1473,11 +1512,13 @@ void synchronize_rcu_bh(void) if (rcu_blocking_is_gp()) return; + init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_bh(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); + destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); @@ -1498,8 +1539,20 @@ static int __rcu_pending(struct rcu_stat check_cpu_stall(rsp, rdp); /* Is the RCU core waiting for a quiescent state from this CPU? */ - if (rdp->qs_pending) { + if (rdp->qs_pending && !rdp->passed_quiesc) { + + /* + * If force_quiescent_state() coming soon and this CPU + * needs a quiescent state, and this is either RCU-sched + * or RCU-bh, force a local reschedule. + */ rdp->n_rp_qs_pending++; + if (!rdp->preemptable && + ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, + jiffies)) + set_need_resched(); + } else if (rdp->qs_pending && rdp->passed_quiesc) { + rdp->n_rp_report_qs++; return 1; } @@ -1767,6 +1820,21 @@ static int __cpuinit rcu_cpu_notify(stru } /* + * This function is invoked towards the end of the scheduler's initialization + * process. Before this is called, the idle task might contain + * RCU read-side critical sections (during which time, this idle + * task is booting the system). After this function is called, the + * idle tasks are prohibited from containing RCU read-side critical + * sections. This function also enables RCU lockdep checking. + */ +void rcu_scheduler_starting(void) +{ + WARN_ON(num_online_cpus() != 1); + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; +} + +/* * Compute the per-level fanout, either using the exact fanout specified * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. */ @@ -1849,6 +1917,14 @@ static void __init rcu_init_one(struct r INIT_LIST_HEAD(&rnp->blocked_tasks[3]); } } + + rnp = rsp->level[NUM_RCU_LVLS - 1]; + for_each_possible_cpu(i) { + while (i > rnp->grphi) + rnp++; + rsp->rda[i]->mynode = rnp; + rcu_boot_init_percpu_data(i, rsp); + } } /* @@ -1859,19 +1935,11 @@ static void __init rcu_init_one(struct r #define RCU_INIT_FLAVOR(rsp, rcu_data) \ do { \ int i; \ - int j; \ - struct rcu_node *rnp; \ \ - rcu_init_one(rsp); \ - rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ - j = 0; \ for_each_possible_cpu(i) { \ - if (i > rnp[j].grphi) \ - j++; \ - per_cpu(rcu_data, i).mynode = &rnp[j]; \ (rsp)->rda[i] = &per_cpu(rcu_data, i); \ - rcu_boot_init_percpu_data(i, rsp); \ } \ + rcu_init_one(rsp); \ } while (0) void __init rcu_init(void) @@ -1879,12 +1947,6 @@ void __init rcu_init(void) int cpu; rcu_bootup_announce(); -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR - printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ -#if NUM_RCU_LVL_4 != 0 - printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); -#endif /* #if NUM_RCU_LVL_4 != 0 */ RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); __rcu_init_preempt(); @@ -1898,6 +1960,7 @@ void __init rcu_init(void) cpu_notifier(rcu_cpu_notify, 0); for_each_online_cpu(cpu) rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); + check_cpu_stall_init(); } #include "rcutree_plugin.h" diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcutree.h linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree.h --- linux-2.6.34-rc4/kernel/rcutree.h 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree.h 2010-04-19 16:27:46.000000000 -0700 @@ -223,6 +223,7 @@ struct rcu_data { /* 5) __rcu_pending() statistics. */ unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ unsigned long n_rp_qs_pending; + unsigned long n_rp_report_qs; unsigned long n_rp_cb_ready; unsigned long n_rp_cpu_needs_gp; unsigned long n_rp_gp_completed; @@ -326,6 +327,7 @@ struct rcu_state { unsigned long jiffies_stall; /* Time at which to check */ /* for CPU stalls. */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ + char *name; /* Name of structure. */ }; /* Return values for rcu_preempt_offline_tasks(). */ diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcutree_plugin.h linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree_plugin.h --- linux-2.6.34-rc4/kernel/rcutree_plugin.h 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree_plugin.h 2010-04-19 16:27:46.000000000 -0700 @@ -26,6 +26,45 @@ #include +/* + * Check the RCU kernel configuration parameters and print informative + * messages about anything out of the ordinary. If you like #ifdef, you + * will love this function. + */ +static void __init rcu_bootup_announce_oddness(void) +{ +#ifdef CONFIG_RCU_TRACE + printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); +#endif +#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) + printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", + CONFIG_RCU_FANOUT); +#endif +#ifdef CONFIG_RCU_FANOUT_EXACT + printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); +#endif +#ifdef CONFIG_RCU_FAST_NO_HZ + printk(KERN_INFO + "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); +#endif +#ifdef CONFIG_PROVE_RCU + printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); +#endif +#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE + printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); +#endif +#ifndef CONFIG_RCU_CPU_STALL_DETECTOR + printk(KERN_INFO + "\tRCU-based detection of stalled CPUs is disabled.\n"); +#endif +#ifndef CONFIG_RCU_CPU_STALL_VERBOSE + printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); +#endif +#if NUM_RCU_LVL_4 != 0 + printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); +#endif +} + #ifdef CONFIG_TREE_PREEMPT_RCU struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); @@ -38,8 +77,8 @@ static int rcu_preempted_readers_exp(str */ static void __init rcu_bootup_announce(void) { - printk(KERN_INFO - "Experimental preemptable hierarchical RCU implementation.\n"); + printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); + rcu_bootup_announce_oddness(); } /* @@ -75,13 +114,19 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_st * that this just means that the task currently running on the CPU is * not in a quiescent state. There might be any number of tasks blocked * while in an RCU read-side critical section. + * + * Unlike the other rcu_*_qs() functions, callers to this function + * must disable irqs in order to protect the assignment to + * ->rcu_read_unlock_special. */ static void rcu_preempt_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); + rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; + current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; } /* @@ -144,9 +189,8 @@ static void rcu_preempt_note_context_swi * grace period, then the fact that the task has been enqueued * means that we continue to block the current grace period. */ - rcu_preempt_qs(cpu); local_irq_save(flags); - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + rcu_preempt_qs(cpu); local_irq_restore(flags); } @@ -236,7 +280,6 @@ static void rcu_read_unlock_special(stru */ special = t->rcu_read_unlock_special; if (special & RCU_READ_UNLOCK_NEED_QS) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; rcu_preempt_qs(smp_processor_id()); } @@ -473,7 +516,6 @@ static void rcu_preempt_check_callbacks( struct task_struct *t = current; if (t->rcu_read_lock_nesting == 0) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; rcu_preempt_qs(cpu); return; } @@ -754,6 +796,7 @@ void exit_rcu(void) static void __init rcu_bootup_announce(void) { printk(KERN_INFO "Hierarchical RCU implementation.\n"); + rcu_bootup_announce_oddness(); } /* @@ -1016,7 +1059,7 @@ int rcu_needs_cpu(int cpu) /* Don't bother unless we are the last non-dyntick-idle CPU. */ for_each_cpu_not(thatcpu, nohz_cpu_mask) - if (thatcpu != cpu) { + if (cpu_online(thatcpu) && thatcpu != cpu) { per_cpu(rcu_dyntick_drain, cpu) = 0; per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; return rcu_needs_cpu_quick_check(cpu); diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/rcutree_trace.c linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree_trace.c --- linux-2.6.34-rc4/kernel/rcutree_trace.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/rcutree_trace.c 2010-04-19 16:27:46.000000000 -0700 @@ -241,11 +241,13 @@ static const struct file_operations rcug static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) { seq_printf(m, "%3d%cnp=%ld " - "qsp=%ld cbr=%ld cng=%ld gpc=%ld gps=%ld nf=%ld nn=%ld\n", + "qsp=%ld rpq=%ld cbr=%ld cng=%ld " + "gpc=%ld gps=%ld nf=%ld nn=%ld\n", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->n_rcu_pending, rdp->n_rp_qs_pending, + rdp->n_rp_report_qs, rdp->n_rp_cb_ready, rdp->n_rp_cpu_needs_gp, rdp->n_rp_gp_completed, diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/sched.c linux-2.6.34-rc4-bc293d62-rcu/kernel/sched.c --- linux-2.6.34-rc4/kernel/sched.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/sched.c 2010-04-19 16:27:46.000000000 -0700 @@ -3696,7 +3696,7 @@ need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); - rcu_sched_qs(cpu); + rcu_note_context_switch(cpu); prev = rq->curr; switch_count = &prev->nivcsw; diff -urpNa -X dontdiff linux-2.6.34-rc4/kernel/softirq.c linux-2.6.34-rc4-bc293d62-rcu/kernel/softirq.c --- linux-2.6.34-rc4/kernel/softirq.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/kernel/softirq.c 2010-04-19 16:27:46.000000000 -0700 @@ -716,7 +716,7 @@ static int run_ksoftirqd(void * __bind_c preempt_enable_no_resched(); cond_resched(); preempt_disable(); - rcu_sched_qs((long)__bind_cpu); + rcu_note_context_switch((long)__bind_cpu); } preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); diff -urpNa -X dontdiff linux-2.6.34-rc4/lib/debugobjects.c linux-2.6.34-rc4-bc293d62-rcu/lib/debugobjects.c --- linux-2.6.34-rc4/lib/debugobjects.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/lib/debugobjects.c 2010-04-19 16:27:46.000000000 -0700 @@ -141,6 +141,7 @@ alloc_object(void *addr, struct debug_bu obj->object = addr; obj->descr = descr; obj->state = ODEBUG_STATE_NONE; + obj->astate = 0; hlist_del(&obj->node); hlist_add_head(&obj->node, &b->list); @@ -252,8 +253,10 @@ static void debug_print_object(struct de if (limit < 5 && obj->descr != descr_test) { limit++; - WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, - obj_states[obj->state], obj->descr->name); + WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " + "object type: %s\n", + msg, obj_states[obj->state], obj->astate, + obj->descr->name); } debug_objects_warnings++; } @@ -447,7 +450,10 @@ void debug_object_deactivate(void *addr, case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: case ODEBUG_STATE_ACTIVE: - obj->state = ODEBUG_STATE_INACTIVE; + if (!obj->astate) + obj->state = ODEBUG_STATE_INACTIVE; + else + debug_print_object(obj, "deactivate"); break; case ODEBUG_STATE_DESTROYED: @@ -553,6 +559,53 @@ out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } +/** + * debug_object_active_state - debug checks object usage state machine + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + * @expect: expected state + * @next: state to move to if expected state is found + */ +void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + raw_spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + if (obj->astate == expect) + obj->astate = next; + else + debug_print_object(obj, "active_state"); + break; + + default: + debug_print_object(obj, "active_state"); + break; + } + } else { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + debug_print_object(&o, "active_state"); + } + + raw_spin_unlock_irqrestore(&db->lock, flags); +} + #ifdef CONFIG_DEBUG_OBJECTS_FREE static void __debug_check_no_obj_freed(const void *address, unsigned long size) { diff -urpNa -X dontdiff linux-2.6.34-rc4/lib/Kconfig.debug linux-2.6.34-rc4-bc293d62-rcu/lib/Kconfig.debug --- linux-2.6.34-rc4/lib/Kconfig.debug 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/lib/Kconfig.debug 2010-04-19 16:27:46.000000000 -0700 @@ -307,6 +307,12 @@ config DEBUG_OBJECTS_WORK work queue routines to track the life time of work objects and validate the work operations. +config DEBUG_OBJECTS_RCU_HEAD + bool "Debug RCU callbacks objects" + depends on DEBUG_OBJECTS + help + Enable this to turn on debugging of RCU list heads (call_rcu() usage). + config DEBUG_OBJECTS_ENABLE_DEFAULT int "debug_objects bootup default value (0-1)" range 0 1 @@ -356,7 +362,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE) + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT @@ -793,7 +799,7 @@ config RCU_CPU_STALL_DETECTOR config RCU_CPU_STALL_VERBOSE bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU - default n + default y help This option causes RCU to printk detailed per-task information for any tasks that are stalling the current RCU grace period. diff -urpNa -X dontdiff linux-2.6.34-rc4/mm/backing-dev.c linux-2.6.34-rc4-bc293d62-rcu/mm/backing-dev.c --- linux-2.6.34-rc4/mm/backing-dev.c 2010-04-19 14:45:58.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/mm/backing-dev.c 2010-04-19 16:27:46.000000000 -0700 @@ -656,7 +656,6 @@ int bdi_init(struct backing_dev_info *bd bdi->max_ratio = 100; bdi->max_prop_frac = PROP_FRAC_BASE; spin_lock_init(&bdi->wb_lock); - INIT_RCU_HEAD(&bdi->rcu_head); INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->wb_list); INIT_LIST_HEAD(&bdi->work_list); diff -urpNa -X dontdiff linux-2.6.34-rc4/mm/slob.c linux-2.6.34-rc4-bc293d62-rcu/mm/slob.c --- linux-2.6.34-rc4/mm/slob.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/mm/slob.c 2010-04-19 16:27:46.000000000 -0700 @@ -647,7 +647,6 @@ void kmem_cache_free(struct kmem_cache * if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); - INIT_RCU_HEAD(&slob_rcu->head); slob_rcu->size = c->size; call_rcu(&slob_rcu->head, kmem_rcu_free); } else { diff -urpNa -X dontdiff linux-2.6.34-rc4/net/bridge/br_multicast.c linux-2.6.34-rc4-bc293d62-rcu/net/bridge/br_multicast.c --- linux-2.6.34-rc4/net/bridge/br_multicast.c 2010-04-19 14:45:59.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/bridge/br_multicast.c 2010-04-19 15:01:10.000000000 -0700 @@ -723,7 +723,7 @@ static int br_multicast_igmp3_report(str if (!pskb_may_pull(skb, len)) return -EINVAL; - grec = (void *)(skb->data + len); + grec = (void *)(skb->data + len - sizeof(*grec)); group = grec->grec_mca; type = grec->grec_type; diff -urpNa -X dontdiff linux-2.6.34-rc4/net/can/raw.c linux-2.6.34-rc4-bc293d62-rcu/net/can/raw.c --- linux-2.6.34-rc4/net/can/raw.c 2010-04-19 14:45:59.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/can/raw.c 2010-04-19 15:01:10.000000000 -0700 @@ -445,7 +445,7 @@ static int raw_setsockopt(struct socket return -EFAULT; } } else if (count == 1) { - if (copy_from_user(&sfilter, optval, optlen)) + if (copy_from_user(&sfilter, optval, sizeof(sfilter))) return -EFAULT; } diff -urpNa -X dontdiff linux-2.6.34-rc4/net/ipv4/udp.c linux-2.6.34-rc4-bc293d62-rcu/net/ipv4/udp.c --- linux-2.6.34-rc4/net/ipv4/udp.c 2010-04-19 14:46:00.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/ipv4/udp.c 2010-04-19 15:01:10.000000000 -0700 @@ -472,8 +472,8 @@ static struct sock *__udp4_lib_lookup(st if (hslot->count < hslot2->count) goto begin; - result = udp4_lib_lookup2(net, INADDR_ANY, sport, - daddr, hnum, dif, + result = udp4_lib_lookup2(net, saddr, sport, + INADDR_ANY, hnum, dif, hslot2, slot2); } rcu_read_unlock(); diff -urpNa -X dontdiff linux-2.6.34-rc4/net/ipv6/udp.c linux-2.6.34-rc4-bc293d62-rcu/net/ipv6/udp.c --- linux-2.6.34-rc4/net/ipv6/udp.c 2010-04-19 14:46:01.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/ipv6/udp.c 2010-04-19 15:01:10.000000000 -0700 @@ -259,8 +259,8 @@ static struct sock *__udp6_lib_lookup(st if (hslot->count < hslot2->count) goto begin; - result = udp6_lib_lookup2(net, &in6addr_any, sport, - daddr, hnum, dif, + result = udp6_lib_lookup2(net, saddr, sport, + &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); diff -urpNa -X dontdiff linux-2.6.34-rc4/net/mac80211/main.c linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/main.c --- linux-2.6.34-rc4/net/mac80211/main.c 2010-04-19 14:46:01.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/main.c 2010-04-19 15:01:10.000000000 -0700 @@ -225,11 +225,11 @@ void ieee80211_bss_info_change_notify(st switch (sdata->vif.type) { case NL80211_IFTYPE_AP: sdata->vif.bss_conf.enable_beacon = - !!rcu_dereference(sdata->u.ap.beacon); + !!sdata->u.ap.beacon; break; case NL80211_IFTYPE_ADHOC: sdata->vif.bss_conf.enable_beacon = - !!rcu_dereference(sdata->u.ibss.presp); + !!sdata->u.ibss.presp; break; case NL80211_IFTYPE_MESH_POINT: sdata->vif.bss_conf.enable_beacon = true; diff -urpNa -X dontdiff linux-2.6.34-rc4/net/mac80211/mesh.c linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/mesh.c --- linux-2.6.34-rc4/net/mac80211/mesh.c 2010-04-19 14:46:01.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/mesh.c 2010-04-19 15:01:10.000000000 -0700 @@ -750,9 +750,6 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_ switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_ACTION: - if (skb->len < IEEE80211_MIN_ACTION_SIZE) - return RX_DROP_MONITOR; - /* fall through */ case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_BEACON: skb_queue_tail(&ifmsh->skb_queue, skb); diff -urpNa -X dontdiff linux-2.6.34-rc4/net/mac80211/rx.c linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/rx.c --- linux-2.6.34-rc4/net/mac80211/rx.c 2010-04-19 14:46:01.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/rx.c 2010-04-19 15:01:10.000000000 -0700 @@ -1974,6 +1974,11 @@ ieee80211_rx_h_action(struct ieee80211_r goto handled; } break; + case MESH_PLINK_CATEGORY: + case MESH_PATH_SEL_CATEGORY: + if (ieee80211_vif_is_mesh(&sdata->vif)) + return ieee80211_mesh_rx_mgmt(sdata, rx->skb); + break; } /* diff -urpNa -X dontdiff linux-2.6.34-rc4/net/mac80211/sta_info.c linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/sta_info.c --- linux-2.6.34-rc4/net/mac80211/sta_info.c 2010-04-19 14:46:01.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/mac80211/sta_info.c 2010-04-19 15:01:10.000000000 -0700 @@ -93,12 +93,18 @@ struct sta_info *sta_info_get(struct iee struct ieee80211_local *local = sdata->local; struct sta_info *sta; - sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); + sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); while (sta) { if (sta->sdata == sdata && memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; - sta = rcu_dereference(sta->hnext); + sta = rcu_dereference_check(sta->hnext, + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); } return sta; } @@ -113,13 +119,19 @@ struct sta_info *sta_info_get_bss(struct struct ieee80211_local *local = sdata->local; struct sta_info *sta; - sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); + sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); while (sta) { if ((sta->sdata == sdata || sta->sdata->bss == sdata->bss) && memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; - sta = rcu_dereference(sta->hnext); + sta = rcu_dereference_check(sta->hnext, + rcu_read_lock_held() || + lockdep_is_held(&local->sta_lock) || + lockdep_is_held(&local->sta_mtx)); } return sta; } diff -urpNa -X dontdiff linux-2.6.34-rc4/net/x25/af_x25.c linux-2.6.34-rc4-bc293d62-rcu/net/x25/af_x25.c --- linux-2.6.34-rc4/net/x25/af_x25.c 2010-04-19 14:46:03.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/x25/af_x25.c 2010-04-19 15:01:10.000000000 -0700 @@ -83,6 +83,41 @@ struct compat_x25_subscrip_struct { }; #endif + +int x25_parse_address_block(struct sk_buff *skb, + struct x25_address *called_addr, + struct x25_address *calling_addr) +{ + unsigned char len; + int needed; + int rc; + + if (skb->len < 1) { + /* packet has no address block */ + rc = 0; + goto empty; + } + + len = *skb->data; + needed = 1 + (len >> 4) + (len & 0x0f); + + if (skb->len < needed) { + /* packet is too short to hold the addresses it claims + to hold */ + rc = -1; + goto empty; + } + + return x25_addr_ntoa(skb->data, called_addr, calling_addr); + +empty: + *called_addr->x25_addr = 0; + *calling_addr->x25_addr = 0; + + return rc; +} + + int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, struct x25_address *calling_addr) { @@ -554,7 +589,8 @@ static int x25_create(struct net *net, s x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; - x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; + x25->facilities.throughput = 0; /* by default don't negotiate + throughput */ x25->facilities.reverse = X25_DEFAULT_REVERSE; x25->dte_facilities.calling_len = 0; x25->dte_facilities.called_len = 0; @@ -922,16 +958,26 @@ int x25_rx_call_request(struct sk_buff * /* * Extract the X.25 addresses and convert them to ASCII strings, * and remove them. + * + * Address block is mandatory in call request packets */ - addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); + addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); + if (addr_len <= 0) + goto out_clear_request; skb_pull(skb, addr_len); /* * Get the length of the facilities, skip past them for the moment * get the call user data because this is needed to determine * the correct listener + * + * Facilities length is mandatory in call request packets */ + if (skb->len < 1) + goto out_clear_request; len = skb->data[0] + 1; + if (skb->len < len) + goto out_clear_request; skb_pull(skb,len); /* @@ -1415,9 +1461,20 @@ static int x25_ioctl(struct socket *sock if (facilities.winsize_in < 1 || facilities.winsize_in > 127) break; - if (facilities.throughput < 0x03 || - facilities.throughput > 0xDD) - break; + if (facilities.throughput) { + int out = facilities.throughput & 0xf0; + int in = facilities.throughput & 0x0f; + if (!out) + facilities.throughput |= + X25_DEFAULT_THROUGHPUT << 4; + else if (out < 0x30 || out > 0xD0) + break; + if (!in) + facilities.throughput |= + X25_DEFAULT_THROUGHPUT; + else if (in < 0x03 || in > 0x0D) + break; + } if (facilities.reverse && (facilities.reverse & 0x81) != 0x81) break; diff -urpNa -X dontdiff linux-2.6.34-rc4/net/x25/x25_facilities.c linux-2.6.34-rc4-bc293d62-rcu/net/x25/x25_facilities.c --- linux-2.6.34-rc4/net/x25/x25_facilities.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc4-bc293d62-rcu/net/x25/x25_facilities.c 2010-04-19 15:01:10.000000000 -0700 @@ -35,7 +35,7 @@ int x25_parse_facilities(struct sk_buff struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) { unsigned char *p = skb->data; - unsigned int len = *p++; + unsigned int len; *vc_fac_mask = 0; @@ -50,6 +50,14 @@ int x25_parse_facilities(struct sk_buff memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); + if (skb->len < 1) + return 0; + + len = *p++; + + if (len >= skb->len) + return -1; + while (len > 0) { switch (*p & X25_FAC_CLASS_MASK) { case X25_FAC_CLASS_A: @@ -247,6 +255,8 @@ int x25_negotiate_facilities(struct sk_b memcpy(new, ours, sizeof(*new)); len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); + if (len < 0) + return len; /* * They want reverse charging, we won't accept it. @@ -259,9 +269,18 @@ int x25_negotiate_facilities(struct sk_b new->reverse = theirs.reverse; if (theirs.throughput) { - if (theirs.throughput < ours->throughput) { - SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); - new->throughput = theirs.throughput; + int theirs_in = theirs.throughput & 0x0f; + int theirs_out = theirs.throughput & 0xf0; + int ours_in = ours->throughput & 0x0f; + int ours_out = ours->throughput & 0xf0; + if (!ours_in || theirs_in < ours_in) { + SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n"); + new->throughput = (new->throughput & 0xf0) | theirs_in; + } + if (!ours_out || theirs_out < ours_out) { + SOCK_DEBUG(sk, + "X.25: outbound throughput negotiated\n"); + new->throughput = (new->throughput & 0x0f) | theirs_out; } } diff -urpNa -X dontdiff linux-2.6.34-rc4/net/x25/x25_in.c linux-2.6.34-rc4-bc293d62-rcu/net/x25/x25_in.c --- linux-2.6.34-rc4/net/x25/x25_in.c 2010-04-19 14:46:03.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/net/x25/x25_in.c 2010-04-19 15:01:10.000000000 -0700 @@ -90,6 +90,7 @@ static int x25_queue_rx_frame(struct soc static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) { struct x25_address source_addr, dest_addr; + int len; switch (frametype) { case X25_CALL_ACCEPTED: { @@ -107,11 +108,17 @@ static int x25_state1_machine(struct soc * Parse the data in the frame. */ skb_pull(skb, X25_STD_MIN_LEN); - skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); - skb_pull(skb, - x25_parse_facilities(skb, &x25->facilities, + + len = x25_parse_address_block(skb, &source_addr, + &dest_addr); + if (len > 0) + skb_pull(skb, len); + + len = x25_parse_facilities(skb, &x25->facilities, &x25->dte_facilities, - &x25->vc_facil_mask)); + &x25->vc_facil_mask); + if (len > 0) + skb_pull(skb, len); /* * Copy any Call User Data. */ diff -urpNa -X dontdiff linux-2.6.34-rc4/security/selinux/avc.c linux-2.6.34-rc4-bc293d62-rcu/security/selinux/avc.c --- linux-2.6.34-rc4/security/selinux/avc.c 2010-04-19 14:46:03.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/security/selinux/avc.c 2010-04-19 16:27:46.000000000 -0700 @@ -288,7 +288,6 @@ static struct avc_node *avc_alloc_node(v if (!node) goto out; - INIT_RCU_HEAD(&node->rhead); INIT_HLIST_NODE(&node->list); avc_cache_stats_incr(allocations); diff -urpNa -X dontdiff linux-2.6.34-rc4/security/selinux/netnode.c linux-2.6.34-rc4-bc293d62-rcu/security/selinux/netnode.c --- linux-2.6.34-rc4/security/selinux/netnode.c 2010-04-19 14:46:03.000000000 -0700 +++ linux-2.6.34-rc4-bc293d62-rcu/security/selinux/netnode.c 2010-04-19 16:27:46.000000000 -0700 @@ -183,8 +183,6 @@ static void sel_netnode_insert(struct se BUG(); } - INIT_RCU_HEAD(&node->rcu); - /* we need to impose a limit on the growth of the hash table so check * this bucket to make sure it is within the specified bounds */ list_add_rcu(&node->list, &sel_netnode_hash[idx].list);