diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/ia64/kvm/kvm-ia64.c linux-2.6.34-rc5-4b402210-rcu/arch/ia64/kvm/kvm-ia64.c --- linux-2.6.34-rc5/arch/ia64/kvm/kvm-ia64.c 2010-04-23 10:39:24.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/ia64/kvm/kvm-ia64.c 2010-04-23 10:42:23.000000000 -0700 @@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struc { struct kvm_memory_slot *memslot; int r, i; - long n, base; + long base; + unsigned long n; unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); @@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struc if (!memslot->dirty_bitmap) goto out; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); base = memslot->base_gfn / BITS_PER_LONG; for (i = 0; i < n/sizeof(long); ++i) { @@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv struct kvm_dirty_log *log) { int r; - int n; + unsigned long n; struct kvm_memory_slot *memslot; int is_dirty = 0; @@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv if (is_dirty) { kvm_flush_remote_tlbs(kvm); memslot = &kvm->memslots->memslots[log->slot]; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/m68knommu/kernel/entry.S linux-2.6.34-rc5-4b402210-rcu/arch/m68knommu/kernel/entry.S --- linux-2.6.34-rc5/arch/m68knommu/kernel/entry.S 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/m68knommu/kernel/entry.S 2010-04-23 10:42:23.000000000 -0700 @@ -145,6 +145,6 @@ ENTRY(ret_from_user_signal) trap #0 ENTRY(ret_from_user_rt_signal) - move #__NR_rt_sigreturn,%d0 + movel #__NR_rt_sigreturn,%d0 trap #0 diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/m68knommu/Makefile linux-2.6.34-rc5-4b402210-rcu/arch/m68knommu/Makefile --- linux-2.6.34-rc5/arch/m68knommu/Makefile 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/m68knommu/Makefile 2010-04-23 10:42:23.000000000 -0700 @@ -94,7 +94,7 @@ cflags-$(CONFIG_M520x) := $(call cc-opt cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307) cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200) cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307) -cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200) +cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307) cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307) cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200) diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/m68knommu/platform/68360/ints.c linux-2.6.34-rc5-4b402210-rcu/arch/m68knommu/platform/68360/ints.c --- linux-2.6.34-rc5/arch/m68knommu/platform/68360/ints.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/m68knommu/platform/68360/ints.c 2010-04-23 10:42:23.000000000 -0700 @@ -107,7 +107,6 @@ void init_IRQ(void) _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ - _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* reserved */ _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/powerpc/kvm/book3s.c linux-2.6.34-rc5-4b402210-rcu/arch/powerpc/kvm/book3s.c --- linux-2.6.34-rc5/arch/powerpc/kvm/book3s.c 2010-04-23 10:39:27.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/powerpc/kvm/book3s.c 2010-04-23 10:42:23.000000000 -0700 @@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kv struct kvm_vcpu *vcpu; ulong ga, ga_end; int is_dirty = 0; - int r, n; + int r; + unsigned long n; mutex_lock(&kvm->slots_lock); @@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv kvm_for_each_vcpu(n, vcpu, kvm) kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/s390/kernel/asm-offsets.c linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/asm-offsets.c --- linux-2.6.34-rc5/arch/s390/kernel/asm-offsets.c 2010-04-23 10:39:28.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/asm-offsets.c 2010-04-23 10:42:23.000000000 -0700 @@ -61,6 +61,7 @@ int main(void) DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); + DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); /* constants used by the vdso */ diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/s390/kernel/swsusp_asm64.S linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/swsusp_asm64.S --- linux-2.6.34-rc5/arch/s390/kernel/swsusp_asm64.S 2010-04-23 10:39:28.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/swsusp_asm64.S 2010-04-23 10:42:23.000000000 -0700 @@ -256,6 +256,9 @@ restore_registers: lghi %r2,0 brasl %r14,arch_set_page_states + /* Reinitialize the channel subsystem */ + brasl %r14,channel_subsystem_reinit + /* Return 0 */ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lghi %r2,0 diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/s390/kernel/time.c linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/time.c --- linux-2.6.34-rc5/arch/s390/kernel/time.c 2010-04-23 10:39:28.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/time.c 2010-04-23 10:42:23.000000000 -0700 @@ -221,6 +221,7 @@ void update_vsyscall(struct timespec *wa vdso_data->xtime_clock_nsec = wall_time->tv_nsec; vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; + vdso_data->ntp_mult = mult; smp_wmb(); ++vdso_data->tb_update_count; } diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/s390/kernel/vdso32/clock_gettime.S linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso32/clock_gettime.S --- linux-2.6.34-rc5/arch/s390/kernel/vdso32/clock_gettime.S 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso32/clock_gettime.S 2010-04-23 10:42:23.000000000 -0700 @@ -38,13 +38,13 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,2f ahi %r0,-1 -2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ +2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ lr %r2,%r0 - lhi %r0,1000 + l %r0,__VDSO_NTP_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 3f - ahi %r0,1000 + a %r0,__VDSO_NTP_MULT(%r5) 3: alr %r0,%r2 srdl %r0,12 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ @@ -86,13 +86,13 @@ __kernel_clock_gettime: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,12f ahi %r0,-1 -12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ +12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ lr %r2,%r0 - lhi %r0,1000 + l %r0,__VDSO_NTP_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 13f - ahi %r0,1000 + a %r0,__VDSO_NTP_MULT(%r5) 13: alr %r0,%r2 srdl %r0,12 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/s390/kernel/vdso32/gettimeofday.S linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso32/gettimeofday.S --- linux-2.6.34-rc5/arch/s390/kernel/vdso32/gettimeofday.S 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso32/gettimeofday.S 2010-04-23 10:42:23.000000000 -0700 @@ -35,13 +35,13 @@ __kernel_gettimeofday: sl %r1,__VDSO_XTIME_STAMP+4(%r5) brc 3,3f ahi %r0,-1 -3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ +3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ st %r0,24(%r15) - lhi %r0,1000 + l %r0,__VDSO_NTP_MULT(%r5) ltr %r1,%r1 mr %r0,%r0 jnm 4f - ahi %r0,1000 + a %r0,__VDSO_NTP_MULT(%r5) 4: al %r0,24(%r15) srdl %r0,12 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/s390/kernel/vdso64/clock_gettime.S linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso64/clock_gettime.S --- linux-2.6.34-rc5/arch/s390/kernel/vdso64/clock_gettime.S 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso64/clock_gettime.S 2010-04-23 10:42:23.000000000 -0700 @@ -36,7 +36,7 @@ __kernel_clock_gettime: stck 48(%r15) /* Store TOD clock */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - mghi %r1,1000 + msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ lg %r0,__VDSO_XTIME_SEC(%r5) @@ -64,7 +64,7 @@ __kernel_clock_gettime: stck 48(%r15) /* Store TOD clock */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - mghi %r1,1000 + msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ lg %r0,__VDSO_XTIME_SEC(%r5) diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/s390/kernel/vdso64/gettimeofday.S linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso64/gettimeofday.S --- linux-2.6.34-rc5/arch/s390/kernel/vdso64/gettimeofday.S 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/s390/kernel/vdso64/gettimeofday.S 2010-04-23 10:42:23.000000000 -0700 @@ -31,7 +31,7 @@ __kernel_gettimeofday: stck 48(%r15) /* Store TOD clock */ lg %r1,48(%r15) sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ - mghi %r1,1000 + msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/sparc/kernel/irq_64.c linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/irq_64.c --- linux-2.6.34-rc5/arch/sparc/kernel/irq_64.c 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/irq_64.c 2010-04-23 10:42:23.000000000 -0700 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,7 @@ #include "entry.h" #include "cpumap.h" +#include "kstack.h" #define NUM_IVECS (IMAP_INR + 1) @@ -712,24 +714,6 @@ void ack_bad_irq(unsigned int virt_irq) void *hardirq_stack[NR_CPUS]; void *softirq_stack[NR_CPUS]; -static __attribute__((always_inline)) void *set_hardirq_stack(void) -{ - void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; - - __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); - if (orig_sp < sp || - orig_sp > (sp + THREAD_SIZE)) { - sp += THREAD_SIZE - 192 - STACK_BIAS; - __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); - } - - return orig_sp; -} -static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) -{ - __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); -} - void __irq_entry handler_irq(int irq, struct pt_regs *regs) { unsigned long pstate, bucket_pa; diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/sparc/kernel/kstack.h linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/kstack.h --- linux-2.6.34-rc5/arch/sparc/kernel/kstack.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/kstack.h 2010-04-23 10:42:23.000000000 -0700 @@ -61,4 +61,23 @@ check_magic: } +static inline __attribute__((always_inline)) void *set_hardirq_stack(void) +{ + void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; + + __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); + if (orig_sp < sp || + orig_sp > (sp + THREAD_SIZE)) { + sp += THREAD_SIZE - 192 - STACK_BIAS; + __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); + } + + return orig_sp; +} + +static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) +{ + __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); +} + #endif /* _KSTACK_H */ diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/sparc/kernel/nmi.c linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/nmi.c --- linux-2.6.34-rc5/arch/sparc/kernel/nmi.c 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/nmi.c 2010-04-23 10:42:23.000000000 -0700 @@ -23,6 +23,8 @@ #include #include +#include "kstack.h" + /* We don't have a real NMI on sparc64, but we can fake one * up using profiling counter overflow interrupts and interrupt * levels. @@ -92,6 +94,7 @@ static void die_nmi(const char *str, str notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; + void *orig_sp; clear_softint(1 << irq); @@ -99,6 +102,8 @@ notrace __kprobes void perfctr_irq(int i nmi_enter(); + orig_sp = set_hardirq_stack(); + if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; @@ -124,6 +129,8 @@ notrace __kprobes void perfctr_irq(int i pcr_ops->write(pcr_enable); } + restore_hardirq_stack(orig_sp); + nmi_exit(); } diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/sparc/kernel/rtrap_64.S linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/rtrap_64.S --- linux-2.6.34-rc5/arch/sparc/kernel/rtrap_64.S 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/rtrap_64.S 2010-04-23 10:42:23.000000000 -0700 @@ -130,7 +130,17 @@ rtrap_xcall: nop call trace_hardirqs_on nop - wrpr %l4, %pil + /* Do not actually set the %pil here. We will do that + * below after we clear PSTATE_IE in the %pstate register. + * If we re-enable interrupts here, we can recurse down + * the hardirq stack potentially endlessly, causing a + * stack overflow. + * + * It is tempting to put this test and trace_hardirqs_on + * call at the 'rt_continue' label, but that will not work + * as that path hits unconditionally and we do not want to + * execute this in NMI return paths, for example. + */ #endif rtrap_no_irq_enable: andcc %l1, TSTATE_PRIV, %l3 diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/sparc/kernel/unaligned_64.c linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/unaligned_64.c --- linux-2.6.34-rc5/arch/sparc/kernel/unaligned_64.c 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/sparc/kernel/unaligned_64.c 2010-04-23 10:42:23.000000000 -0700 @@ -50,7 +50,7 @@ static inline enum direction decode_dire } /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ -static inline int decode_access_size(unsigned int insn) +static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) { unsigned int tmp; @@ -66,7 +66,7 @@ static inline int decode_access_size(uns return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); - die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); + die_if_kernel("Byte sized unaligned access?!?!", regs); /* GCC should never warn that control reaches the end * of this function without returning a value because @@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); - int size = decode_access_size(insn); + int size = decode_access_size(regs, insn); int orig_asi, asi; current_thread_info()->kern_una_regs = regs; diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/sparc/lib/mcount.S linux-2.6.34-rc5-4b402210-rcu/arch/sparc/lib/mcount.S --- linux-2.6.34-rc5/arch/sparc/lib/mcount.S 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/sparc/lib/mcount.S 2010-04-23 10:42:23.000000000 -0700 @@ -34,7 +34,7 @@ mcount: cmp %g1, %g2 be,pn %icc, 1f mov %i7, %g3 - save %sp, -128, %sp + save %sp, -176, %sp mov %g3, %o1 jmpl %g1, %o7 mov %i7, %o0 @@ -56,7 +56,7 @@ mcount: nop 5: mov %i7, %g2 mov %fp, %g3 - save %sp, -128, %sp + save %sp, -176, %sp mov %g2, %l0 ba,pt %xcc, ftrace_graph_caller mov %g3, %l1 @@ -85,7 +85,7 @@ ftrace_caller: lduw [%g1 + %lo(function_trace_stop)], %g1 brnz,pn %g1, ftrace_stub mov %fp, %g3 - save %sp, -128, %sp + save %sp, -176, %sp mov %g2, %o1 mov %g2, %l0 mov %g3, %l1 @@ -120,7 +120,7 @@ ENTRY(ftrace_graph_caller) END(ftrace_graph_caller) ENTRY(return_to_handler) - save %sp, -128, %sp + save %sp, -176, %sp call ftrace_return_to_handler mov %fp, %o0 jmpl %o0 + 8, %g0 diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/um/drivers/line.c linux-2.6.34-rc5-4b402210-rcu/arch/um/drivers/line.c --- linux-2.6.34-rc5/arch/um/drivers/line.c 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/um/drivers/line.c 2010-04-23 10:42:23.000000000 -0700 @@ -6,6 +6,7 @@ #include "linux/irqreturn.h" #include "linux/kd.h" #include "linux/sched.h" +#include "linux/slab.h" #include "chan_kern.h" #include "irq_kern.h" #include "irq_user.h" diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/um/os-Linux/helper.c linux-2.6.34-rc5-4b402210-rcu/arch/um/os-Linux/helper.c --- linux-2.6.34-rc5/arch/um/os-Linux/helper.c 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/um/os-Linux/helper.c 2010-04-23 10:42:23.000000000 -0700 @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include "kern_constants.h" diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/x86/ia32/ia32entry.S linux-2.6.34-rc5-4b402210-rcu/arch/x86/ia32/ia32entry.S --- linux-2.6.34-rc5/arch/x86/ia32/ia32entry.S 2010-04-23 10:39:29.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/x86/ia32/ia32entry.S 2010-04-23 10:42:23.000000000 -0700 @@ -626,7 +626,7 @@ ia32_sys_call_table: .quad stub32_sigreturn .quad stub32_clone /* 120 */ .quad sys_setdomainname - .quad sys_uname + .quad sys_newuname .quad sys_modify_ldt .quad compat_sys_adjtimex .quad sys32_mprotect /* 125 */ diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/x86/kernel/dumpstack.h linux-2.6.34-rc5-4b402210-rcu/arch/x86/kernel/dumpstack.h --- linux-2.6.34-rc5/arch/x86/kernel/dumpstack.h 2010-04-23 10:39:30.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/x86/kernel/dumpstack.h 2010-04-23 10:42:23.000000000 -0700 @@ -14,6 +14,8 @@ #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) #endif +#include + extern void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, char *log_lvl); @@ -42,8 +44,10 @@ static inline unsigned long rewind_frame get_bp(frame); #ifdef CONFIG_FRAME_POINTER - while (n--) - frame = frame->next_frame; + while (n--) { + if (probe_kernel_address(&frame->next_frame, frame)) + break; + } #endif return (unsigned long)frame; diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/x86/kvm/mmu.c linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/mmu.c --- linux-2.6.34-rc5/arch/x86/kvm/mmu.c 2010-04-23 10:39:30.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/mmu.c 2010-04-23 10:42:23.000000000 -0700 @@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struc for_each_sp(pages, sp, parents, i) { kvm_mmu_zap_page(kvm, sp); mmu_pages_clear_parents(&parents); + zapped++; } - zapped += pages.nr; kvm_mmu_pages_init(parent, &parents, &pages); } @@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm */ if (used_pages > kvm_nr_mmu_pages) { - while (used_pages > kvm_nr_mmu_pages) { + while (used_pages > kvm_nr_mmu_pages && + !list_empty(&kvm->arch.active_mmu_pages)) { struct kvm_mmu_page *page; page = container_of(kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); - kvm_mmu_zap_page(kvm, page); + used_pages -= kvm_mmu_zap_page(kvm, page); used_pages--; } + kvm_nr_mmu_pages = used_pages; kvm->arch.n_free_mmu_pages = 0; } else @@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm && !sp->role.invalid) { pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); - kvm_mmu_zap_page(kvm, sp); + if (kvm_mmu_zap_page(kvm, sp)) + nn = bucket->first; } } } diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/x86/kvm/svm.c linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/svm.c --- linux-2.6.34-rc5/arch/x86/kvm/svm.c 2010-04-23 10:39:30.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/svm.c 2010-04-23 10:42:23.000000000 -0700 @@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu( if (err) goto free_svm; + err = -ENOMEM; page = alloc_page(GFP_KERNEL); - if (!page) { - err = -ENOMEM; + if (!page) goto uninit; - } - err = -ENOMEM; msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) - goto uninit; + goto free_page1; nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!nested_msrpm_pages) - goto uninit; - - svm->msrpm = page_address(msrpm_pages); - svm_vcpu_init_msrpm(svm->msrpm); + goto free_page2; hsave_page = alloc_page(GFP_KERNEL); if (!hsave_page) - goto uninit; + goto free_page3; + svm->nested.hsave = page_address(hsave_page); + svm->msrpm = page_address(msrpm_pages); + svm_vcpu_init_msrpm(svm->msrpm); + svm->nested.msrpm = page_address(nested_msrpm_pages); svm->vmcb = page_address(page); @@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu( return &svm->vcpu; +free_page3: + __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); +free_page2: + __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); +free_page1: + __free_page(page); uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/x86/kvm/vmx.c linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/vmx.c --- linux-2.6.34-rc5/arch/x86/kvm/vmx.c 2010-04-23 10:39:30.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/vmx.c 2010-04-23 10:42:23.000000000 -0700 @@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) +#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) + /* * These 2 parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive @@ -131,7 +133,7 @@ struct vcpu_vmx { } host_state; struct { int vm86_active; - u8 save_iopl; + ulong save_rflags; struct kvm_save_segment { u16 selector; unsigned long base; @@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kv static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { - unsigned long rflags; + unsigned long rflags, save_rflags; rflags = vmcs_readl(GUEST_RFLAGS); - if (to_vmx(vcpu)->rmode.vm86_active) - rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); + if (to_vmx(vcpu)->rmode.vm86_active) { + rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; + save_rflags = to_vmx(vcpu)->rmode.save_rflags; + rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; + } return rflags; } static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { - if (to_vmx(vcpu)->rmode.vm86_active) + if (to_vmx(vcpu)->rmode.vm86_active) { + to_vmx(vcpu)->rmode.save_rflags = rflags; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; + } vmcs_writel(GUEST_RFLAGS, rflags); } @@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); flags = vmcs_readl(GUEST_RFLAGS); - flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); - flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); + flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; + flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | @@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); - vmx->rmode.save_iopl - = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; + vmx->rmode.save_rflags = flags; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; diff -urpNa -X dontdiff linux-2.6.34-rc5/arch/x86/kvm/x86.c linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/x86.c --- linux-2.6.34-rc5/arch/x86/kvm/x86.c 2010-04-23 10:39:30.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/arch/x86/kvm/x86.c 2010-04-23 10:42:23.000000000 -0700 @@ -433,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) { - printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", - cr0, kvm_read_cr0(vcpu)); kvm_inject_gp(vcpu, 0); return; } @@ -443,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { - printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); kvm_inject_gp(vcpu, 0); return; } if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { - printk(KERN_DEBUG "set_cr0: #GP, set PG flag " - "and a clear PE flag\n"); kvm_inject_gp(vcpu, 0); return; } @@ -461,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, int cs_db, cs_l; if (!is_pae(vcpu)) { - printk(KERN_DEBUG "set_cr0: #GP, start paging " - "in long mode while PAE is disabled\n"); kvm_inject_gp(vcpu, 0); return; } kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) { - printk(KERN_DEBUG "set_cr0: #GP, start paging " - "in long mode while CS.L == 1\n"); kvm_inject_gp(vcpu, 0); return; @@ -477,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { - printk(KERN_DEBUG "set_cr0: #GP, pdptrs " - "reserved bits\n"); kvm_inject_gp(vcpu, 0); return; } @@ -505,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; if (cr4 & CR4_RESERVED_BITS) { - printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); kvm_inject_gp(vcpu, 0); return; } if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) { - printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " - "in long mode\n"); kvm_inject_gp(vcpu, 0); return; } } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { - printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); kvm_inject_gp(vcpu, 0); return; } if (cr4 & X86_CR4_VMXE) { - printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); kvm_inject_gp(vcpu, 0); return; } @@ -547,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, if (is_long_mode(vcpu)) { if (cr3 & CR3_L_MODE_RESERVED_BITS) { - printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); kvm_inject_gp(vcpu, 0); return; } } else { if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) { - printk(KERN_DEBUG - "set_cr3: #GP, reserved bits\n"); kvm_inject_gp(vcpu, 0); return; } if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { - printk(KERN_DEBUG "set_cr3: #GP, pdptrs " - "reserved bits\n"); kvm_inject_gp(vcpu, 0); return; } @@ -593,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) { - printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); kvm_inject_gp(vcpu, 0); return; } @@ -649,15 +627,12 @@ static u32 emulated_msrs[] = { static void set_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) { - printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", - efer); kvm_inject_gp(vcpu, 0); return; } if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { - printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); kvm_inject_gp(vcpu, 0); return; } @@ -667,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vc feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { - printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); kvm_inject_gp(vcpu, 0); return; } @@ -678,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vc feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { - printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); kvm_inject_gp(vcpu, 0); return; } @@ -967,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu * if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; - /* only 0 or all 1s can be written to IA32_MCi_CTL */ + /* only 0 or all 1s can be written to IA32_MCi_CTL + * some Linux kernels though clear bit 10 in bank 4 to + * workaround a BIOS/GART TBL issue on AMD K8s, ignore + * this to avoid an uncatched #GP in the guest + */ if ((offset & 0x3) == 0 && - data != 0 && data != ~(u64)0) + data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; @@ -2635,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - int r, n, i; + int r, i; struct kvm_memory_slot *memslot; + unsigned long n; unsigned long is_dirty = 0; unsigned long *dirty_bitmap = NULL; @@ -2651,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kv if (!memslot->dirty_bitmap) goto out; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); r = -ENOMEM; dirty_bitmap = vmalloc(n); @@ -4483,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v kvm_set_cr8(vcpu, kvm_run->cr8); if (vcpu->arch.pio.cur_count) { + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = complete_pio(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r) goto out; } @@ -5146,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcp int ret = 0; u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); + u32 desc_limit; old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); @@ -5168,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcp } } - if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { + desc_limit = get_desc_limit(&nseg_desc); + if (!nseg_desc.p || + ((desc_limit < 0x67 && (nseg_desc.type & 8)) || + desc_limit < 0x2b)) { kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); return 1; } diff -urpNa -X dontdiff linux-2.6.34-rc5/Documentation/HOWTO linux-2.6.34-rc5-4b402210-rcu/Documentation/HOWTO --- linux-2.6.34-rc5/Documentation/HOWTO 2010-04-23 10:39:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/Documentation/HOWTO 2010-04-23 10:42:23.000000000 -0700 @@ -234,7 +234,7 @@ process is as follows: Linus, usually the patches that have already been included in the -next kernel for a few weeks. The preferred way to submit big changes is using git (the kernel's source management tool, more information - can be found at http://git.or.cz/) but plain patches are also just + can be found at http://git-scm.com/) but plain patches are also just fine. - After two weeks a -rc1 kernel is released it is now possible to push only patches that do not include new features that could affect the diff -urpNa -X dontdiff linux-2.6.34-rc5/Documentation/kernel-parameters.txt linux-2.6.34-rc5-4b402210-rcu/Documentation/kernel-parameters.txt --- linux-2.6.34-rc5/Documentation/kernel-parameters.txt 2010-04-23 10:39:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/Documentation/kernel-parameters.txt 2010-04-23 10:42:23.000000000 -0700 @@ -1194,7 +1194,7 @@ and is between 256 and 4096 characters. libata.force= [LIBATA] Force configurations. The format is comma separated list of "[ID:]VAL" where ID is - PORT[:DEVICE]. PORT and DEVICE are decimal numbers + PORT[.DEVICE]. PORT and DEVICE are decimal numbers matching port, link or device. Basically, it matches the ATA ID string printed on console by libata. If the whole ID part is omitted, the last PORT and DEVICE diff -urpNa -X dontdiff linux-2.6.34-rc5/Documentation/RCU/stallwarn.txt linux-2.6.34-rc5-4b402210-rcu/Documentation/RCU/stallwarn.txt --- linux-2.6.34-rc5/Documentation/RCU/stallwarn.txt 2010-04-23 10:39:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/Documentation/RCU/stallwarn.txt 2010-05-08 08:02:22.000000000 -0700 @@ -3,35 +3,79 @@ Using RCU's CPU Stall Detector The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables RCU's CPU stall detector, which detects conditions that unduly delay RCU grace periods. The stall detector's idea of what constitutes -"unduly delayed" is controlled by a pair of C preprocessor macros: +"unduly delayed" is controlled by a set of C preprocessor macros: RCU_SECONDS_TILL_STALL_CHECK This macro defines the period of time that RCU will wait from the beginning of a grace period until it issues an RCU CPU - stall warning. It is normally ten seconds. + stall warning. This time period is normally ten seconds. RCU_SECONDS_TILL_STALL_RECHECK This macro defines the period of time that RCU will wait after - issuing a stall warning until it issues another stall warning. - It is normally set to thirty seconds. + issuing a stall warning until it issues another stall warning + for the same stall. This time period is normally set to thirty + seconds. RCU_STALL_RAT_DELAY - The CPU stall detector tries to make the offending CPU rat on itself, - as this often gives better-quality stack traces. However, if - the offending CPU does not detect its own stall in the number - of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will - complain. This is normally set to two jiffies. - -The following problems can result in an RCU CPU stall warning: + The CPU stall detector tries to make the offending CPU print its + own warnings, as this often gives better-quality stack traces. + However, if the offending CPU does not detect its own stall in + the number of jiffies specified by RCU_STALL_RAT_DELAY, then + some other CPU will complain. This delay is normally set to + two jiffies. + +When a CPU detects that it is stalling, it will print a message similar +to the following: + +INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies) + +This message indicates that CPU 5 detected that it was causing a stall, +and that the stall was affecting RCU-sched. This message will normally be +followed by a stack dump of the offending CPU. On TREE_RCU kernel builds, +RCU and RCU-sched are implemented by the same underlying mechanism, +while on TREE_PREEMPT_RCU kernel builds, RCU is instead implemented +by rcu_preempt_state. + +On the other hand, if the offending CPU fails to print out a stall-warning +message quickly enough, some other CPU will print a message similar to +the following: + +INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies) + +This message indicates that CPU 2 detected that CPUs 3 and 5 were both +causing stalls, and that the stall was affecting RCU-bh. This message +will normally be followed by stack dumps for each CPU. Please note that +TREE_PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, +and that the tasks will be indicated by PID, for example, "P3421". +It is even possible for a rcu_preempt_state stall to be caused by both +CPUs -and- tasks, in which case the offending CPUs and tasks will all +be called out in the list. + +Finally, if the grace period ends just as the stall warning starts +printing, there will be a spurious stall-warning message: + +INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies) + +This is rare, but does happen from time to time in real life. + +So your kernel printed an RCU CPU stall warning. The next question is +"What caused it?" The following problems can result in RCU CPU stall +warnings: o A CPU looping in an RCU read-side critical section. -o A CPU looping with interrupts disabled. +o A CPU looping with interrupts disabled. This condition can + result in RCU-sched and RCU-bh stalls. + +o A CPU looping with preemption disabled. This condition can + result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh + stalls. -o A CPU looping with preemption disabled. +o A CPU looping with bottom halves disabled. This condition can + result in RCU-sched and RCU-bh stalls. o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel without invoking schedule(). @@ -39,20 +83,24 @@ o For !CONFIG_PREEMPT kernels, a CPU loo o A bug in the RCU implementation. o A hardware failure. This is quite unlikely, but has occurred - at least once in a former life. A CPU failed in a running system, + at least once in real life. A CPU failed in a running system, becoming unresponsive, but not causing an immediate crash. This resulted in a series of RCU CPU stall warnings, eventually leading the realization that the CPU had failed. -The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning. -SRCU does not do so directly, but its calls to synchronize_sched() will -result in RCU-sched detecting any CPU stalls that might be occurring. - -To diagnose the cause of the stall, inspect the stack traces. The offending -function will usually be near the top of the stack. If you have a series -of stall warnings from a single extended stall, comparing the stack traces -can often help determine where the stall is occurring, which will usually -be in the function nearest the top of the stack that stays the same from -trace to trace. +The RCU, RCU-sched, and RCU-bh implementations have CPU stall +warning. SRCU does not have its own CPU stall warnings, but its +calls to synchronize_sched() will result in RCU-sched detecting +RCU-sched-related CPU stalls. Please note that RCU only detects +CPU stalls when there is a grace period in progress. No grace period, +no CPU stall warnings. + +To diagnose the cause of the stall, inspect the stack traces. +The offending function will usually be near the top of the stack. +If you have a series of stall warnings from a single extended stall, +comparing the stack traces can often help determine where the stall +is occurring, which will usually be in the function nearest the top of +that portion of the stack which remains the same from trace to trace. +If you can reliably trigger the stall, ftrace can be quite helpful. RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE. diff -urpNa -X dontdiff linux-2.6.34-rc5/Documentation/RCU/trace.txt linux-2.6.34-rc5-4b402210-rcu/Documentation/RCU/trace.txt --- linux-2.6.34-rc5/Documentation/RCU/trace.txt 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/Documentation/RCU/trace.txt 2010-05-08 08:02:22.000000000 -0700 @@ -256,23 +256,23 @@ o Each element of the form "1/1 0:127 ^0 The output of "cat rcu/rcu_pending" looks as follows: rcu_sched: - 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 - 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 - 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 - 3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 - 4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 - 5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 - 6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 - 7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 + 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 + 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 + 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 + 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 + 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 + 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 + 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 + 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 rcu_bh: - 0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 - 1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 - 2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 - 3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 - 4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 - 5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 - 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 - 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 + 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 + 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 + 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 + 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 + 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 + 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 + 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 + 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 As always, this is once again split into "rcu_sched" and "rcu_bh" portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional @@ -284,6 +284,9 @@ o "np" is the number of times that __rcu o "qsp" is the number of times that the RCU was waiting for a quiescent state from this CPU. +o "rpq" is the number of times that the CPU had passed through + a quiescent state, but not yet reported it to RCU. + o "cbr" is the number of times that this CPU had RCU callbacks that had passed through a grace period, and were thus ready to be invoked. diff -urpNa -X dontdiff linux-2.6.34-rc5/Documentation/stable_kernel_rules.txt linux-2.6.34-rc5-4b402210-rcu/Documentation/stable_kernel_rules.txt --- linux-2.6.34-rc5/Documentation/stable_kernel_rules.txt 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/Documentation/stable_kernel_rules.txt 2010-04-23 10:42:23.000000000 -0700 @@ -18,16 +18,15 @@ Rules on what kind of patches are accept - It cannot contain any "trivial" fixes in it (spelling changes, whitespace cleanups, etc). - It must follow the Documentation/SubmittingPatches rules. - - It or an equivalent fix must already exist in Linus' tree. Quote the - respective commit ID in Linus' tree in your patch submission to -stable. + - It or an equivalent fix must already exist in Linus' tree (upstream). Procedure for submitting patches to the -stable tree: - Send the patch, after verifying that it follows the above rules, to - stable@kernel.org. - - To have the patch automatically included in the stable tree, add the - the tag + stable@kernel.org. You must note the upstream commit ID in the changelog + of your submission. + - To have the patch automatically included in the stable tree, add the tag Cc: stable@kernel.org in the sign-off area. Once the patch is merged it will be applied to the stable tree without anything else needing to be done by the author diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/ata/libata-eh.c linux-2.6.34-rc5-4b402210-rcu/drivers/ata/libata-eh.c --- linux-2.6.34-rc5/drivers/ata/libata-eh.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/ata/libata-eh.c 2010-04-23 10:42:23.000000000 -0700 @@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct at void ata_qc_schedule_eh(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct request_queue *q = qc->scsicmd->device->request_queue; + unsigned long flags; WARN_ON(!ap->ops->error_handler); @@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queue * Note that ATA_QCFLAG_FAILED is unconditionally set after * this function completes. */ + spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); + spin_unlock_irqrestore(q->queue_lock, flags); } /** @@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata } /* okay, this error is ours */ + memset(&tf, 0, sizeof(tf)); rc = ata_eh_read_log_10h(dev, &tag, &tf); if (rc) { ata_link_printk(link, KERN_ERR, "failed to read log page 10h " diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/ata/pata_pcmcia.c linux-2.6.34-rc5-4b402210-rcu/drivers/ata/pata_pcmcia.c --- linux-2.6.34-rc5/drivers/ata/pata_pcmcia.c 2010-04-23 10:39:32.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/ata/pata_pcmcia.c 2010-04-23 10:42:23.000000000 -0700 @@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_de PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), @@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_de PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/char/pcmcia/cm4000_cs.c linux-2.6.34-rc5-4b402210-rcu/drivers/char/pcmcia/cm4000_cs.c --- linux-2.6.34-rc5/drivers/char/pcmcia/cm4000_cs.c 2010-04-23 10:39:34.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/char/pcmcia/cm4000_cs.c 2010-04-23 10:42:23.000000000 -0700 @@ -1026,14 +1026,16 @@ static ssize_t cmm_read(struct file *fil xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ /* last check before exit */ - if (!io_detect_cm4000(iobase, dev)) - count = -ENODEV; + if (!io_detect_cm4000(iobase, dev)) { + rc = -ENODEV; + goto release_io; + } if (test_bit(IS_INVREV, &dev->flags) && count > 0) str_invert_revert(dev->rbuf, count); if (copy_to_user(buf, dev->rbuf, count)) - return -EFAULT; + rc = -EFAULT; release_io: clear_bit(LOCK_IO, &dev->flags); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/firewire/core-iso.c linux-2.6.34-rc5-4b402210-rcu/drivers/firewire/core-iso.c --- linux-2.6.34-rc5/drivers/firewire/core-iso.c 2010-04-23 10:39:34.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/firewire/core-iso.c 2010-04-23 10:42:23.000000000 -0700 @@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_ca for (try = 0; try < 5; try++) { new = allocate ? old - bandwidth : old + bandwidth; if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) - break; + return -EBUSY; data[0] = cpu_to_be32(old); data[1] = cpu_to_be32(new); @@ -218,7 +218,7 @@ static int manage_channel(struct fw_card u32 channels_mask, u64 offset, bool allocate, __be32 data[2]) { __be32 c, all, old; - int i, retry = 5; + int i, ret = -EIO, retry = 5; old = all = allocate ? cpu_to_be32(~0) : 0; @@ -226,6 +226,8 @@ static int manage_channel(struct fw_card if (!(channels_mask & 1 << i)) continue; + ret = -EBUSY; + c = cpu_to_be32(1 << (31 - i)); if ((old & c) != (all & c)) continue; @@ -251,12 +253,16 @@ static int manage_channel(struct fw_card /* 1394-1995 IRM, fall through to retry. */ default: - if (retry--) + if (retry) { + retry--; i--; + } else { + ret = -EIO; + } } } - return -EIO; + return ret; } static void deallocate_channel(struct fw_card *card, int irm_id, diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/firewire/ohci.c linux-2.6.34-rc5-4b402210-rcu/drivers/firewire/ohci.c --- linux-2.6.34-rc5/drivers/firewire/ohci.c 2010-04-23 10:39:34.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/firewire/ohci.c 2010-04-23 10:42:23.000000000 -0700 @@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ struct fw_packet *packet, u32 csr) { struct fw_packet response; - int tcode, length, ext_tcode, sel; + int tcode, length, ext_tcode, sel, try; __be32 *payload, lock_old; u32 lock_arg, lock_data; @@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); reg_write(ohci, OHCI1394_CSRControl, sel); - if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) - lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData)); - else - fw_notify("swap not done yet\n"); + for (try = 0; try < 20; try++) + if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { + lock_old = cpu_to_be32(reg_read(ohci, + OHCI1394_CSRData)); + fw_fill_response(&response, packet->header, + RCODE_COMPLETE, + &lock_old, sizeof(lock_old)); + goto out; + } + + fw_error("swap not done (CSR lock timeout)\n"); + fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); - fw_fill_response(&response, packet->header, - RCODE_COMPLETE, &lock_old, sizeof(lock_old)); out: fw_core_handle_response(&ohci->card, &response); } static void handle_local_request(struct context *ctx, struct fw_packet *packet) { - u64 offset; - u32 csr; + u64 offset, csr; if (ctx == &ctx->ohci->at_request_ctx) { packet->ack = ACK_PENDING; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/drm_stub.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/drm_stub.c --- linux-2.6.34-rc5/drivers/gpu/drm/drm_stub.c 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/drm_stub.c 2010-04-23 10:42:23.000000000 -0700 @@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev) } driver = dev->driver; - drm_vblank_cleanup(dev); - drm_lastclose(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && @@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev) dev->agp = NULL; } + drm_vblank_cleanup(dev); + list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_rmmap(dev, r_list->map); drm_ht_remove(&dev->map_hash); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_dma.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_dma.c --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_dma.c 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_dma.c 2010-04-23 10:42:23.000000000 -0700 @@ -1357,6 +1357,8 @@ static void i915_setup_compression(struc dev_priv->cfb_size = size; + dev_priv->compressed_fb = compressed_fb; + if (IS_GM45(dev)) { g4x_disable_fbc(dev); I915_WRITE(DPFC_CB_BASE, compressed_fb->start); @@ -1364,12 +1366,22 @@ static void i915_setup_compression(struc i8xx_disable_fbc(dev); I915_WRITE(FBC_CFB_BASE, cfb_base); I915_WRITE(FBC_LL_BASE, ll_base); + dev_priv->compressed_llb = compressed_llb; } DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, ll_base, size >> 20); } +static void i915_cleanup_compression(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + drm_mm_put_block(dev_priv->compressed_fb); + if (!IS_GM45(dev)) + drm_mm_put_block(dev_priv->compressed_llb); +} + /* true = enable decode, false = disable decoder */ static unsigned int i915_vga_set_decode(void *cookie, bool state) { @@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); + if (I915_HAS_FBC(dev) && i915_powersave) + i915_cleanup_compression(dev); drm_mm_takedown(&dev_priv->vram); i915_gem_lastclose(dev); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_drv.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_drv.c --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_drv.c 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_drv.c 2010-04-23 10:42:23.000000000 -0700 @@ -69,7 +69,8 @@ const static struct intel_device_info in }; const static struct intel_device_info intel_i85x_info = { - .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, + .cursor_needs_physical = 1, }; const static struct intel_device_info intel_i865g_info = { @@ -151,7 +152,7 @@ const static struct pci_device_id pciidl INTEL_VGA_DEVICE(0x3577, &intel_i830_info), INTEL_VGA_DEVICE(0x2562, &intel_845g_info), INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), - INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), + INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_drv.h linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_drv.h --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_drv.h 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_drv.h 2010-04-23 10:42:23.000000000 -0700 @@ -195,6 +195,7 @@ struct intel_overlay; struct intel_device_info { u8 is_mobile : 1; u8 is_i8xx : 1; + u8 is_i85x : 1; u8 is_i915g : 1; u8 is_i9xx : 1; u8 is_i945gm : 1; @@ -235,11 +236,14 @@ typedef struct drm_i915_private { drm_dma_handle_t *status_page_dmah; void *hw_status_page; + void *seqno_page; dma_addr_t dma_status_page; uint32_t counter; unsigned int status_gfx_addr; + unsigned int seqno_gfx_addr; drm_local_map_t hws_map; struct drm_gem_object *hws_obj; + struct drm_gem_object *seqno_obj; struct drm_gem_object *pwrctx; struct resource mch_res; @@ -630,6 +634,9 @@ typedef struct drm_i915_private { u8 max_delay; enum no_fbc_reason no_fbc_reason; + + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; } drm_i915_private_t; /** driver private structure attached to each drm_gem_object */ @@ -1070,7 +1077,7 @@ extern int i915_wait_ring(struct drm_dev #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) -#define IS_I85X(dev) ((dev)->pci_device == 0x3582) +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) @@ -1135,6 +1142,7 @@ extern int i915_wait_ring(struct drm_dev #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ IS_GEN6(dev)) +#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_gem.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_gem.c --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_gem.c 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_gem.c 2010-04-23 10:42:23.000000000 -0700 @@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct dr } } +#define PIPE_CONTROL_FLUSH(addr) \ + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ + PIPE_CONTROL_DEPTH_STALL); \ + OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ + OUT_RING(0); \ + OUT_RING(0); \ + /** * Creates a new sequence number, emitting a write of it to the status page * plus an interrupt, which will trigger i915_user_interrupt_handler. @@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, if (dev_priv->mm.next_gem_seqno == 0) dev_priv->mm.next_gem_seqno++; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(seqno); + if (HAS_PIPE_CONTROL(dev)) { + u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); + /* + * Workaround qword write incoherence by flushing the + * PIPE_NOTIFY buffers out to memory before requesting + * an interrupt. + */ + BEGIN_LP_RING(32); + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + OUT_RING(seqno); + OUT_RING(0); + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(scratch_addr); + OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + OUT_RING(seqno); + OUT_RING(0); + ADVANCE_LP_RING(); + } else { + BEGIN_LP_RING(4); + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(seqno); + + OUT_RING(MI_USER_INTERRUPT); + ADVANCE_LP_RING(); + } DRM_DEBUG_DRIVER("%d\n", seqno); @@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *de { drm_i915_private_t *dev_priv = dev->dev_private; - return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); + if (IS_I965G(dev)) + return ((volatile u32 *)(dev_priv->seqno_page))[0]; + else + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); } /** @@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct pitch_val = obj_priv->stride / tile_width; pitch_val = ffs(pitch_val) - 1; + if (obj_priv->tiling_mode == I915_TILING_Y && + HAS_128_BYTE_Y_TILING(dev)) + WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); + else + WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); + val = obj_priv->gtt_offset; if (obj_priv->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; @@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev) return 0; } +/* + * 965+ support PIPE_CONTROL commands, which provide finer grained control + * over cache flushing. + */ +static int +i915_gem_init_pipe_control(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + obj = drm_gem_object_alloc(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; + goto err; + } + obj_priv = to_intel_bo(obj); + obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + + ret = i915_gem_object_pin(obj, 4096); + if (ret) + goto err_unref; + + dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; + dev_priv->seqno_page = kmap(obj_priv->pages[0]); + if (dev_priv->seqno_page == NULL) + goto err_unpin; + + dev_priv->seqno_obj = obj; + memset(dev_priv->seqno_page, 0, PAGE_SIZE); + + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(obj); +err: + return ret; +} + static int i915_gem_init_hws(struct drm_device *dev) { @@ -4563,7 +4656,8 @@ i915_gem_init_hws(struct drm_device *dev obj = drm_gem_object_alloc(dev, 4096); if (obj == NULL) { DRM_ERROR("Failed to allocate status page\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; @@ -4571,7 +4665,7 @@ i915_gem_init_hws(struct drm_device *dev ret = i915_gem_object_pin(obj, 4096); if (ret != 0) { drm_gem_object_unreference(obj); - return ret; + goto err_unref; } dev_priv->status_gfx_addr = obj_priv->gtt_offset; @@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev if (dev_priv->hw_status_page == NULL) { DRM_ERROR("Failed to map status page.\n"); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - return -EINVAL; + ret = -EINVAL; + goto err_unpin; } + + if (HAS_PIPE_CONTROL(dev)) { + ret = i915_gem_init_pipe_control(dev); + if (ret) + goto err_unpin; + } + dev_priv->hws_obj = obj; memset(dev_priv->hw_status_page, 0, PAGE_SIZE); if (IS_GEN6(dev)) { @@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(obj); +err: + return 0; +} + +static void +i915_gem_cleanup_pipe_control(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + obj = dev_priv->seqno_obj; + obj_priv = to_intel_bo(obj); + kunmap(obj_priv->pages[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + dev_priv->seqno_obj = NULL; + + dev_priv->seqno_page = NULL; } static void @@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device * memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); dev_priv->hw_status_page = NULL; + if (HAS_PIPE_CONTROL(dev)) + i915_gem_cleanup_pipe_control(dev); + /* Write high address into HWS_PGA when disabling. */ I915_WRITE(HWS_PGA, 0x1ffff000); } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_gem_tiling.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_gem_tiling.c --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_gem_tiling.c 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_gem_tiling.c 2010-04-23 10:42:23.000000000 -0700 @@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, i * reg, so dont bother to check the size */ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; - } else if (IS_I9XX(dev)) { - uint32_t pitch_val = ffs(stride / tile_width) - 1; - - /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) - * instead of 4 (2KB) on 945s. - */ - if (pitch_val > I915_FENCE_MAX_PITCH_VAL || - size > (I830_FENCE_MAX_SIZE_VAL << 20)) + } else if (IS_GEN3(dev) || IS_GEN2(dev)) { + if (stride > 8192) return false; - } else { - uint32_t pitch_val = ffs(stride / tile_width) - 1; - if (pitch_val > I830_FENCE_MAX_PITCH_VAL || - size > (I830_FENCE_MAX_SIZE_VAL << 19)) - return false; + if (IS_GEN3(dev)) { + if (size > I830_FENCE_MAX_SIZE_VAL << 20) + return false; + } else { + if (size > I830_FENCE_MAX_SIZE_VAL << 19) + return false; + } } /* 965+ just needs multiples of tile width */ diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_irq.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_irq.c --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_irq.c 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_irq.c 2010-04-23 10:42:23.000000000 -0700 @@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_USER_INTERRUPT) { + if (gt_iir & GT_PIPE_NOTIFY) { u32 seqno = i915_get_gem_seqno(dev); dev_priv->mm.irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); @@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { if (HAS_PCH_SPLIT(dev)) - ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); } @@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { if (HAS_PCH_SPLIT(dev)) - ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); } @@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(stru /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; - u32 render_mask = GT_USER_INTERRUPT; + u32 render_mask = GT_PIPE_NOTIFY; u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_opregion.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_opregion.c --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_opregion.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_opregion.c 2010-04-23 10:42:23.000000000 -0700 @@ -382,8 +382,57 @@ static void intel_didl_outputs(struct dr struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; struct drm_connector *connector; + acpi_handle handle; + struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; + unsigned long long device_id; + acpi_status status; int i = 0; + handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) + return; + + if (acpi_is_video_device(acpi_dev)) + acpi_video_bus = acpi_dev; + else { + list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { + if (acpi_is_video_device(acpi_cdev)) { + acpi_video_bus = acpi_cdev; + break; + } + } + } + + if (!acpi_video_bus) { + printk(KERN_WARNING "No ACPI video bus found\n"); + return; + } + + list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { + if (i >= 8) { + dev_printk (KERN_ERR, &dev->pdev->dev, + "More than 8 outputs detected\n"); + return; + } + status = + acpi_evaluate_integer(acpi_cdev->handle, "_ADR", + NULL, &device_id); + if (ACPI_SUCCESS(status)) { + if (!device_id) + goto blind_set; + opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); + i++; + } + } + +end: + /* If fewer than 8 outputs, the list must be null terminated */ + if (i < 8) + opregion->acpi->didl[i] = 0; + return; + +blind_set: + i = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { int output_type = ACPI_OTHER_OUTPUT; if (i >= 8) { @@ -416,10 +465,7 @@ static void intel_didl_outputs(struct dr opregion->acpi->didl[i] |= (1<<31) | output_type | i; i++; } - - /* If fewer than 8 outputs, the list must be null terminated */ - if (i < 8) - opregion->acpi->didl[i] = 0; + goto end; } int intel_opregion_init(struct drm_device *dev, int resume) diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_reg.h linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_reg.h --- linux-2.6.34-rc5/drivers/gpu/drm/i915/i915_reg.h 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/i915_reg.h 2010-04-23 10:42:23.000000000 -0700 @@ -230,6 +230,16 @@ #define ASYNC_FLIP (1<<22) #define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_B (1<<20) +#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) +#define PIPE_CONTROL_QW_WRITE (1<<14) +#define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_WC_FLUSH (1<<12) +#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ +#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ +#define PIPE_CONTROL_ISP_DIS (1<<9) +#define PIPE_CONTROL_NOTIFY (1<<8) +#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ +#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ /* * Fence registers @@ -241,7 +251,7 @@ #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) -#define I915_FENCE_MAX_PITCH_VAL 0x10 +#define I915_FENCE_MAX_PITCH_VAL 4 #define I830_FENCE_MAX_PITCH_VAL 6 #define I830_FENCE_MAX_SIZE_VAL (1<<8) @@ -2285,6 +2295,7 @@ #define DEIER 0x4400c /* GT interrupt */ +#define GT_PIPE_NOTIFY (1 << 4) #define GT_SYNC_STATUS (1 << 2) #define GT_USER_INTERRUPT (1 << 0) diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/gpu/drm/i915/intel_display.c linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/intel_display.c --- linux-2.6.34-rc5/drivers/gpu/drm/i915/intel_display.c 2010-04-23 10:39:35.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/gpu/drm/i915/intel_display.c 2010-04-23 10:42:23.000000000 -0700 @@ -4853,17 +4853,18 @@ static void intel_init_display(struct dr dev_priv->display.update_wm = g4x_update_wm; else if (IS_I965G(dev)) dev_priv->display.update_wm = i965_update_wm; - else if (IS_I9XX(dev) || IS_MOBILE(dev)) { + else if (IS_I9XX(dev)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; + } else if (IS_I85X(dev)) { + dev_priv->display.update_wm = i9xx_update_wm; + dev_priv->display.get_fifo_size = i85x_get_fifo_size; } else { - if (IS_I85X(dev)) - dev_priv->display.get_fifo_size = i85x_get_fifo_size; - else if (IS_845G(dev)) + dev_priv->display.update_wm = i830_update_wm; + if (IS_845G(dev)) dev_priv->display.get_fifo_size = i845_get_fifo_size; else dev_priv->display.get_fifo_size = i830_get_fifo_size; - dev_priv->display.update_wm = i830_update_wm; } } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/hwmon/asus_atk0110.c linux-2.6.34-rc5-4b402210-rcu/drivers/hwmon/asus_atk0110.c --- linux-2.6.34-rc5/drivers/hwmon/asus_atk0110.c 2010-04-23 10:39:36.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/hwmon/asus_atk0110.c 2010-04-23 10:42:23.000000000 -0700 @@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_d int err; list_for_each_entry(s, &data->sensor_list, list) { + sysfs_attr_init(&s->input_attr.attr); err = device_create_file(data->hwmon_dev, &s->input_attr); if (err) return err; + sysfs_attr_init(&s->label_attr.attr); err = device_create_file(data->hwmon_dev, &s->label_attr); if (err) return err; + sysfs_attr_init(&s->limit1_attr.attr); err = device_create_file(data->hwmon_dev, &s->limit1_attr); if (err) return err; + sysfs_attr_init(&s->limit2_attr.attr); err = device_create_file(data->hwmon_dev, &s->limit2_attr); if (err) return err; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/i2c/busses/i2c-imx.c linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-imx.c --- linux-2.6.34-rc5/drivers/i2c/busses/i2c-imx.c 2010-04-23 10:39:37.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-imx.c 2010-04-23 10:42:23.000000000 -0700 @@ -146,10 +146,10 @@ static int i2c_imx_bus_busy(struct imx_i "<%s> I2C Interrupted\n", __func__); return -EINTR; } - if (time_after(jiffies, orig_jiffies + HZ / 1000)) { + if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C bus is busy\n", __func__); - return -EIO; + return -ETIMEDOUT; } schedule(); } @@ -444,6 +444,8 @@ static int i2c_imx_xfer(struct i2c_adapt result = i2c_imx_read(i2c_imx, &msgs[i]); else result = i2c_imx_write(i2c_imx, &msgs[i]); + if (result) + goto fail0; } fail0: diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/i2c/busses/i2c-omap.c linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-omap.c --- linux-2.6.34-rc5/drivers/i2c/busses/i2c-omap.c 2010-04-23 10:39:37.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-omap.c 2010-04-23 10:42:23.000000000 -0700 @@ -903,6 +903,11 @@ omap_i2c_probe(struct platform_device *p platform_set_drvdata(pdev, dev); + if (cpu_is_omap7xx()) + dev->reg_shift = 1; + else + dev->reg_shift = 2; + if ((r = omap_i2c_get_clocks(dev)) != 0) goto err_iounmap; @@ -926,11 +931,6 @@ omap_i2c_probe(struct platform_device *p dev->b_hw = 1; /* Enable hardware fixes */ } - if (cpu_is_omap7xx()) - dev->reg_shift = 1; - else - dev->reg_shift = 2; - /* reset ASAP, clearing any IRQs */ omap_i2c_init(dev); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/i2c/busses/i2c-pnx.c linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-pnx.c --- linux-2.6.34-rc5/drivers/i2c/busses/i2c-pnx.c 2010-04-23 10:39:37.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-pnx.c 2010-04-23 10:42:23.000000000 -0700 @@ -173,6 +173,9 @@ static int i2c_pnx_master_xmit(struct i2 /* We still have something to talk about... */ val = *alg_data->mif.buf++; + if (alg_data->mif.len == 1) + val |= stop_bit; + alg_data->mif.len--; iowrite32(val, I2C_REG_TX(alg_data)); @@ -246,6 +249,9 @@ static int i2c_pnx_master_rcv(struct i2c __func__); if (alg_data->mif.len == 1) { + /* Last byte, do not acknowledge next rcv. */ + val |= stop_bit; + /* * Enable interrupt RFDAIE (data in Rx fifo), * and disable DRMIE (need data for Tx) @@ -633,6 +639,8 @@ static int __devinit i2c_pnx_probe(struc */ tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2; + if (tmp > 0x3FF) + tmp = 0x3FF; iowrite32(tmp, I2C_REG_CKH(alg_data)); iowrite32(tmp, I2C_REG_CKL(alg_data)); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/i2c/busses/i2c-stu300.c linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-stu300.c --- linux-2.6.34-rc5/drivers/i2c/busses/i2c-stu300.c 2010-04-23 10:39:37.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/i2c/busses/i2c-stu300.c 2010-04-23 10:42:23.000000000 -0700 @@ -498,7 +498,7 @@ static int stu300_set_clk(struct stu300_ int i = 0; /* Locate the apropriate clock setting */ - while (i < ARRAY_SIZE(stu300_clktable) && + while (i < ARRAY_SIZE(stu300_clktable) - 1 && stu300_clktable[i].rate < clkrate) i++; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/ide/ide-cs.c linux-2.6.34-rc5-4b402210-rcu/drivers/ide/ide-cs.c --- linux-2.6.34-rc5/drivers/ide/ide-cs.c 2010-04-23 10:39:37.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/ide/ide-cs.c 2010-04-23 10:42:23.000000000 -0700 @@ -409,6 +409,8 @@ static struct pcmcia_device_id ide_ids[] PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420), PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17), + PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10), PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e), PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), @@ -429,6 +431,8 @@ static struct pcmcia_device_id ide_ids[] PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47), PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918), PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/md/raid5.c linux-2.6.34-rc5-4b402210-rcu/drivers/md/raid5.c --- linux-2.6.34-rc5/drivers/md/raid5.c 2010-04-23 10:39:40.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/md/raid5.c 2010-04-23 10:42:23.000000000 -0700 @@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(rai int previous, int *dd_idx, struct stripe_head *sh) { - long stripe; - unsigned long chunk_number; + sector_t stripe, stripe2; + sector_t chunk_number; unsigned int chunk_offset; int pd_idx, qd_idx; int ddf_layout = 0; @@ -1671,18 +1671,13 @@ static sector_t raid5_compute_sector(rai */ chunk_offset = sector_div(r_sector, sectors_per_chunk); chunk_number = r_sector; - BUG_ON(r_sector != chunk_number); /* * Compute the stripe number */ - stripe = chunk_number / data_disks; - - /* - * Compute the data disk and parity disk indexes inside the stripe - */ - *dd_idx = chunk_number % data_disks; - + stripe = chunk_number; + *dd_idx = sector_div(stripe, data_disks); + stripe2 = stripe; /* * Select the parity disk based on the user selected algorithm. */ @@ -1694,21 +1689,21 @@ static sector_t raid5_compute_sector(rai case 5: switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_PARITY_0: @@ -1728,7 +1723,7 @@ static sector_t raid5_compute_sector(rai switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1737,7 +1732,7 @@ static sector_t raid5_compute_sector(rai (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1746,12 +1741,12 @@ static sector_t raid5_compute_sector(rai (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; @@ -1770,7 +1765,7 @@ static sector_t raid5_compute_sector(rai /* Exactly the same as RIGHT_ASYMMETRIC, but or * of blocks for computing Q is different. */ - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1785,7 +1780,8 @@ static sector_t raid5_compute_sector(rai * D D D P Q rather than * Q D D D P */ - pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); + stripe2 += 1; + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1797,7 +1793,7 @@ static sector_t raid5_compute_sector(rai case ALGORITHM_ROTATING_N_CONTINUE: /* Same as left_symmetric but Q is before P */ - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + raid_disks - 1) % raid_disks; *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; ddf_layout = 1; @@ -1805,27 +1801,27 @@ static sector_t raid5_compute_sector(rai case ALGORITHM_LEFT_ASYMMETRIC_6: /* RAID5 left_asymmetric, with Q on last device */ - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_ASYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_LEFT_SYMMETRIC_6: - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_SYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; @@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct s : conf->algorithm; sector_t stripe; int chunk_offset; - int chunk_number, dummy1, dd_idx = i; + sector_t chunk_number; + int dummy1, dd_idx = i; sector_t r_sector; struct stripe_head sh2; chunk_offset = sector_div(new_sector, sectors_per_chunk); stripe = new_sector; - BUG_ON(new_sector != stripe); if (i == sh->pd_idx) return 0; @@ -1970,7 +1966,7 @@ static sector_t compute_blocknr(struct s } chunk_number = stripe * data_disks + i; - r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; + r_sector = chunk_number * sectors_per_chunk + chunk_offset; check = raid5_compute_sector(conf, r_sector, previous, &dummy1, &sh2); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/pcmcia/cistpl.c linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/cistpl.c --- linux-2.6.34-rc5/drivers/pcmcia/cistpl.c 2010-04-23 10:39:53.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/cistpl.c 2010-04-23 10:42:23.000000000 -0700 @@ -1484,6 +1484,11 @@ int pccard_validate_cis(struct pcmcia_so if (!s) return -EINVAL; + if (s->functions) { + WARN_ON(1); + return -EINVAL; + } + /* We do not want to validate the CIS cache... */ mutex_lock(&s->ops_mutex); destroy_cis_cache(s); @@ -1639,7 +1644,7 @@ static ssize_t pccard_show_cis(struct ko count = 0; else { struct pcmcia_socket *s; - unsigned int chains; + unsigned int chains = 1; if (off + count > size) count = size - off; @@ -1648,7 +1653,7 @@ static ssize_t pccard_show_cis(struct ko if (!(s->state & SOCKET_PRESENT)) return -ENODEV; - if (pccard_validate_cis(s, &chains)) + if (!s->functions && pccard_validate_cis(s, &chains)) return -EIO; if (!chains) return -ENODATA; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/pcmcia/db1xxx_ss.c linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/db1xxx_ss.c --- linux-2.6.34-rc5/drivers/pcmcia/db1xxx_ss.c 2010-04-23 10:39:53.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/db1xxx_ss.c 2010-04-23 10:42:23.000000000 -0700 @@ -166,8 +166,10 @@ static int db1x_pcmcia_setup_irqs(struct ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, IRQF_DISABLED, "pcmcia_insert", sock); - if (ret) + if (ret) { + local_irq_restore(flags); goto out1; + } ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, IRQF_DISABLED, "pcmcia_eject", sock); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/pcmcia/ds.c linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/ds.c --- linux-2.6.34-rc5/drivers/pcmcia/ds.c 2010-04-23 10:39:53.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/ds.c 2010-04-23 10:42:24.000000000 -0700 @@ -687,12 +687,10 @@ static void pcmcia_requery(struct pcmcia new_funcs = mfc.nfn; else new_funcs = 1; - if (old_funcs > new_funcs) { + if (old_funcs != new_funcs) { + /* we need to re-start */ pcmcia_card_remove(s, NULL); pcmcia_card_add(s); - } else if (new_funcs > old_funcs) { - s->functions = new_funcs; - pcmcia_device_add(s, 1); } } @@ -728,6 +726,8 @@ static int pcmcia_load_firmware(struct p struct pcmcia_socket *s = dev->socket; const struct firmware *fw; int ret = -ENOMEM; + cistpl_longlink_mfc_t mfc; + int old_funcs, new_funcs = 1; if (!filename) return -EINVAL; @@ -750,6 +750,14 @@ static int pcmcia_load_firmware(struct p goto release; } + /* we need to re-start if the number of functions changed */ + old_funcs = s->functions; + if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, + &mfc)) + new_funcs = mfc.nfn; + + if (old_funcs != new_funcs) + ret = -EBUSY; /* update information */ pcmcia_device_query(dev); @@ -858,10 +866,8 @@ static inline int pcmcia_devmatch(struct if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { dev_dbg(&dev->dev, "device needs a fake CIS\n"); if (!dev->socket->fake_cis) - pcmcia_load_firmware(dev, did->cisfile); - - if (!dev->socket->fake_cis) - return 0; + if (pcmcia_load_firmware(dev, did->cisfile)) + return 0; } if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) { diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/pcmcia/pcmcia_resource.c linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/pcmcia_resource.c --- linux-2.6.34-rc5/drivers/pcmcia/pcmcia_resource.c 2010-04-23 10:39:53.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/pcmcia_resource.c 2010-04-23 10:42:24.000000000 -0700 @@ -755,12 +755,12 @@ int pcmcia_request_irq(struct pcmcia_dev else printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n"); -#ifdef CONFIG_PCMCIA_PROBE - - if (s->irq.AssignedIRQ != 0) { - /* If the interrupt is already assigned, it must be the same */ + /* If the interrupt is already assigned, it must be the same */ + if (s->irq.AssignedIRQ != 0) irq = s->irq.AssignedIRQ; - } else { + +#ifdef CONFIG_PCMCIA_PROBE + if (!irq) { int try; u32 mask = s->irq_mask; void *data = p_dev; /* something unique to this device */ diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/pcmcia/rsrc_nonstatic.c linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/rsrc_nonstatic.c --- linux-2.6.34-rc5/drivers/pcmcia/rsrc_nonstatic.c 2010-04-23 10:39:53.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/pcmcia/rsrc_nonstatic.c 2010-04-23 10:42:24.000000000 -0700 @@ -214,7 +214,7 @@ static void do_io_probe(struct pcmcia_so return; } for (i = base, most = 0; i < base+num; i += 8) { - res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); + res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) continue; hole = inb(i); @@ -231,9 +231,14 @@ static void do_io_probe(struct pcmcia_so bad = any = 0; for (i = base; i < base+num; i += 8) { - res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); - if (!res) + res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); + if (!res) { + if (!any) + printk(" excluding"); + if (!bad) + bad = any = i; continue; + } for (j = 0; j < 8; j++) if (inb(i+j) != most) break; @@ -253,6 +258,7 @@ static void do_io_probe(struct pcmcia_so } if (bad) { if ((num > 16) && (bad == base) && (i == base+num)) { + sub_interval(&s_data->io_db, bad, i-bad); printk(" nothing: probe failed.\n"); return; } else { @@ -804,7 +810,7 @@ static int adjust_memory(struct pcmcia_s static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) { struct socket_data *data = s->resource_data; - unsigned long size = end - start + 1; + unsigned long size; int ret = 0; #if defined(CONFIG_X86) @@ -814,6 +820,8 @@ static int adjust_io(struct pcmcia_socke start = 0x100; #endif + size = end - start + 1; + if (end < start) return -EINVAL; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/regulator/mc13783-regulator.c linux-2.6.34-rc5-4b402210-rcu/drivers/regulator/mc13783-regulator.c --- linux-2.6.34-rc5/drivers/regulator/mc13783-regulator.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/regulator/mc13783-regulator.c 2010-04-23 10:42:24.000000000 -0700 @@ -618,9 +618,12 @@ static int __devexit mc13783_regulator_r dev_get_platdata(&pdev->dev); int i; + platform_set_drvdata(pdev, NULL); + for (i = 0; i < pdata->num_regulators; i++) regulator_unregister(priv->regulators[i]); + kfree(priv); return 0; } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/block/dasd_3990_erp.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/block/dasd_3990_erp.c --- linux-2.6.34-rc5/drivers/s390/block/dasd_3990_erp.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/block/dasd_3990_erp.c 2010-04-23 10:42:24.000000000 -0700 @@ -2309,7 +2309,7 @@ static struct dasd_ccw_req *dasd_3990_er cqr->retries); dasd_block_set_timer(device->block, (HZ << 3)); } - return cqr; + return erp; } ccw = cqr->cpaddr; @@ -2372,6 +2372,9 @@ dasd_3990_erp_additional_erp(struct dasd /* add erp and initialize with default TIC */ erp = dasd_3990_erp_add_erp(cqr); + if (IS_ERR(erp)) + return erp; + /* inspect sense, determine specific ERP if possible */ if (erp != cqr) { @@ -2711,6 +2714,8 @@ dasd_3990_erp_action(struct dasd_ccw_req if (erp == NULL) { /* no matching erp found - set up erp */ erp = dasd_3990_erp_additional_erp(cqr); + if (IS_ERR(erp)) + return erp; } else { /* matching erp found - set all leading erp's to DONE */ erp = dasd_3990_erp_handle_match_erp(cqr, erp); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/block/dasd.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/block/dasd.c --- linux-2.6.34-rc5/drivers/s390/block/dasd.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/block/dasd.c 2010-04-23 10:42:24.000000000 -0700 @@ -1899,7 +1899,8 @@ restart: /* Process requests that may be recovered */ if (cqr->status == DASD_CQR_NEED_ERP) { erp_fn = base->discipline->erp_action(cqr); - erp_fn(cqr); + if (IS_ERR(erp_fn(cqr))) + continue; goto restart; } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/char/zcore.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/char/zcore.c --- linux-2.6.34-rc5/drivers/s390/char/zcore.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/char/zcore.c 2010-04-23 10:42:24.000000000 -0700 @@ -638,11 +638,7 @@ static int __init zcore_reipl_init(void) rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); else rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); - if (rc) { - free_page((unsigned long) ipl_block); - return rc; - } - if (csum_partial(ipl_block, ipl_block->hdr.len, 0) != + if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) != ipib_info.checksum) { TRACE("Checksum does not match\n"); free_page((unsigned long) ipl_block); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/cio/chsc.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/chsc.c --- linux-2.6.34-rc5/drivers/s390/cio/chsc.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/chsc.c 2010-04-23 10:42:24.000000000 -0700 @@ -29,6 +29,7 @@ #include "chsc.h" static void *sei_page; +static DEFINE_SPINLOCK(sda_lock); /** * chsc_error_from_response() - convert a chsc response to an error @@ -832,11 +833,10 @@ void __init chsc_free_sei_area(void) kfree(sei_page); } -int __init -chsc_enable_facility(int operation_code) +int chsc_enable_facility(int operation_code) { int ret; - struct { + static struct { struct chsc_header request; u8 reserved1:4; u8 format:4; @@ -849,33 +849,32 @@ chsc_enable_facility(int operation_code) u32 reserved5:4; u32 format2:4; u32 reserved6:24; - } __attribute__ ((packed)) *sda_area; + } __attribute__ ((packed, aligned(4096))) sda_area; - sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); - if (!sda_area) - return -ENOMEM; - sda_area->request.length = 0x0400; - sda_area->request.code = 0x0031; - sda_area->operation_code = operation_code; + spin_lock(&sda_lock); + memset(&sda_area, 0, sizeof(sda_area)); + sda_area.request.length = 0x0400; + sda_area.request.code = 0x0031; + sda_area.operation_code = operation_code; - ret = chsc(sda_area); + ret = chsc(&sda_area); if (ret > 0) { ret = (ret == 3) ? -ENODEV : -EBUSY; goto out; } - switch (sda_area->response.code) { + switch (sda_area.response.code) { case 0x0101: ret = -EOPNOTSUPP; break; default: - ret = chsc_error_from_response(sda_area->response.code); + ret = chsc_error_from_response(sda_area.response.code); } if (ret != 0) CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", - operation_code, sda_area->response.code); + operation_code, sda_area.response.code); out: - free_page((unsigned long)sda_area); + spin_unlock(&sda_lock); return ret; } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/cio/chsc_sch.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/chsc_sch.c --- linux-2.6.34-rc5/drivers/s390/cio/chsc_sch.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/chsc_sch.c 2010-04-23 10:42:24.000000000 -0700 @@ -124,7 +124,7 @@ static int chsc_subchannel_prepare(struc * since we don't have a way to clear the subchannel and * cannot disable it with a request running. */ - cc = stsch(sch->schid, &schib); + cc = stsch_err(sch->schid, &schib); if (!cc && scsw_stctl(&schib.scsw)) return -EAGAIN; return 0; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/cio/cio.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/cio.c --- linux-2.6.34-rc5/drivers/s390/cio/cio.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/cio.c 2010-04-23 10:42:24.000000000 -0700 @@ -361,7 +361,7 @@ int cio_commit_config(struct subchannel struct schib schib; int ccode, retry, ret = 0; - if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; for (retry = 0; retry < 5; retry++) { @@ -372,7 +372,7 @@ int cio_commit_config(struct subchannel return ccode; switch (ccode) { case 0: /* successful */ - if (stsch(sch->schid, &schib) || + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; if (cio_check_config(sch, &schib)) { @@ -404,7 +404,7 @@ int cio_update_schib(struct subchannel * { struct schib schib; - if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; memcpy(&sch->schib, &schib, sizeof(schib)); @@ -771,7 +771,7 @@ cio_get_console_sch_no(void) if (console_irq != -1) { /* VM provided us with the irq number of the console. */ schid.sch_no = console_irq; - if (stsch(schid, &console_subchannel.schib) != 0 || + if (stsch_err(schid, &console_subchannel.schib) != 0 || (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !console_subchannel.schib.pmcw.dnv) return -1; @@ -863,10 +863,10 @@ __disable_subchannel_easy(struct subchan cc = 0; for (retry=0;retry<3;retry++) { schib->pmcw.ena = 0; - cc = msch(schid, schib); + cc = msch_err(schid, schib); if (cc) return (cc==3?-ENODEV:-EBUSY); - if (stsch(schid, schib) || !css_sch_is_valid(schib)) + if (stsch_err(schid, schib) || !css_sch_is_valid(schib)) return -ENODEV; if (!schib->pmcw.ena) return 0; @@ -913,7 +913,7 @@ static int stsch_reset(struct subchannel pgm_check_occured = 0; s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; - rc = stsch(schid, addr); + rc = stsch_err(schid, addr); s390_base_pgm_handler_fn = NULL; /* The program check handler could have changed pgm_check_occured. */ @@ -950,7 +950,7 @@ static int __shutdown_subchannel_easy(st /* No default clear strategy */ break; } - stsch(schid, &schib); + stsch_err(schid, &schib); __disable_subchannel_easy(schid, &schib); } out: @@ -1086,7 +1086,7 @@ int __init cio_get_iplinfo(struct cio_ip schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; if (!schid.one) return -ENODEV; - if (stsch(schid, &schib)) + if (stsch_err(schid, &schib)) return -ENODEV; if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) return -ENODEV; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/cio/css.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/css.c --- linux-2.6.34-rc5/drivers/s390/cio/css.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/css.c 2010-04-23 10:42:24.000000000 -0700 @@ -870,15 +870,10 @@ static int __init css_bus_init(void) /* Try to enable MSS. */ ret = chsc_enable_facility(CHSC_SDA_OC_MSS); - switch (ret) { - case 0: /* Success. */ - max_ssid = __MAX_SSID; - break; - case -ENOMEM: - goto out; - default: + if (ret) max_ssid = 0; - } + else /* Success. */ + max_ssid = __MAX_SSID; ret = slow_subchannel_init(); if (ret) @@ -1048,6 +1043,11 @@ static int __init channel_subsystem_init } subsys_initcall_sync(channel_subsystem_init_sync); +void channel_subsystem_reinit(void) +{ + chsc_enable_facility(CHSC_SDA_OC_MSS); +} + #ifdef CONFIG_PROC_FS static ssize_t cio_settle_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/cio/device_fsm.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/device_fsm.c --- linux-2.6.34-rc5/drivers/s390/cio/device_fsm.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/cio/device_fsm.c 2010-04-23 10:42:24.000000000 -0700 @@ -45,7 +45,7 @@ static void ccw_timeout_log(struct ccw_d sch = to_subchannel(cdev->dev.parent); private = to_io_private(sch); orb = &private->orb; - cc = stsch(sch->schid, &schib); + cc = stsch_err(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " "device information:\n", get_clock()); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/s390/scsi/zfcp_fsf.c linux-2.6.34-rc5-4b402210-rcu/drivers/s390/scsi/zfcp_fsf.c --- linux-2.6.34-rc5/drivers/s390/scsi/zfcp_fsf.c 2010-04-23 10:39:54.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/s390/scsi/zfcp_fsf.c 2010-04-23 10:42:24.000000000 -0700 @@ -2105,7 +2105,8 @@ static void zfcp_fsf_req_trace(struct zf blktrc.inb_usage = req->qdio_req.qdio_inb_usage; blktrc.outb_usage = req->qdio_req.qdio_outb_usage; - if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { + if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && + !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { blktrc.flags |= ZFCP_BLK_LAT_VALID; blktrc.channel_lat = lat_in->channel_lat * ticks; blktrc.fabric_lat = lat_in->fabric_lat * ticks; @@ -2157,9 +2158,8 @@ static void zfcp_fsf_send_fcp_command_ta fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); - zfcp_fsf_req_trace(req, scpnt); - skip_fsfstatus: + zfcp_fsf_req_trace(req, scpnt); zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); scpnt->host_scribble = NULL; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/be2iscsi/be_mgmt.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/be2iscsi/be_mgmt.c --- linux-2.6.34-rc5/drivers/scsi/be2iscsi/be_mgmt.c 2010-04-23 10:39:55.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/be2iscsi/be_mgmt.c 2010-04-23 10:42:24.000000000 -0700 @@ -169,6 +169,7 @@ unsigned char mgmt_invalidate_icds(struc SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for" "mgmt_invalidate_icds \n"); + spin_unlock(&ctrl->mbox_lock); return -1; } nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/bnx2i/bnx2i.h linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/bnx2i/bnx2i.h --- linux-2.6.34-rc5/drivers/scsi/bnx2i/bnx2i.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/bnx2i/bnx2i.h 2010-04-23 10:42:24.000000000 -0700 @@ -362,6 +362,7 @@ struct bnx2i_hba { u32 num_ccell; int ofld_conns_active; + wait_queue_head_t eh_wait; int max_active_conns; struct iscsi_cid_queue cid_que; @@ -381,6 +382,7 @@ struct bnx2i_hba { spinlock_t lock; /* protects hba structure access */ struct mutex net_dev_lock;/* sync net device access */ + int hba_shutdown_tmo; /* * PCI related info. */ diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/bnx2i/bnx2i_init.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/bnx2i/bnx2i_init.c --- linux-2.6.34-rc5/drivers/scsi/bnx2i/bnx2i_init.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/bnx2i/bnx2i_init.c 2010-04-23 10:42:24.000000000 -0700 @@ -177,11 +177,22 @@ void bnx2i_stop(void *handle) struct bnx2i_hba *hba = handle; /* check if cleanup happened in GOING_DOWN context */ - clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) iscsi_host_for_each_session(hba->shost, bnx2i_drop_session); + + /* Wait for all endpoints to be torn down, Chip will be reset once + * control returns to network driver. So it is required to cleanup and + * release all connection resources before returning from this routine. + */ + wait_event_interruptible_timeout(hba->eh_wait, + (hba->ofld_conns_active == 0), + hba->hba_shutdown_tmo); + /* This flag should be cleared last so that ep_disconnect() gracefully + * cleans up connection context + */ + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); } /** diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/bnx2i/bnx2i_iscsi.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/bnx2i/bnx2i_iscsi.c --- linux-2.6.34-rc5/drivers/scsi/bnx2i/bnx2i_iscsi.c 2010-04-23 10:39:55.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/bnx2i/bnx2i_iscsi.c 2010-04-23 10:42:24.000000000 -0700 @@ -820,6 +820,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct spin_lock_init(&hba->lock); mutex_init(&hba->net_dev_lock); + init_waitqueue_head(&hba->eh_wait); + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + hba->hba_shutdown_tmo = 240 * HZ; + else /* 5706/5708/5709 */ + hba->hba_shutdown_tmo = 30 * HZ; if (iscsi_host_add(shost, &hba->pcidev->dev)) goto free_dump_mem; @@ -1658,8 +1663,8 @@ static struct iscsi_endpoint *bnx2i_ep_c */ hba = bnx2i_check_route(dst_addr); - if (!hba) { - rc = -ENOMEM; + if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { + rc = -EINVAL; goto check_busy; } @@ -1804,7 +1809,7 @@ static int bnx2i_ep_poll(struct iscsi_en (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)), msecs_to_jiffies(timeout_ms)); - if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) + if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) rc = -1; if (rc > 0) @@ -1957,6 +1962,8 @@ return_bnx2i_ep: if (!hba->ofld_conns_active) bnx2i_unreg_dev_all(); + + wake_up_interruptible(&hba->eh_wait); } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/dpt_i2o.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/dpt_i2o.c --- linux-2.6.34-rc5/drivers/scsi/dpt_i2o.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/dpt_i2o.c 2010-04-23 10:42:24.000000000 -0700 @@ -188,7 +188,8 @@ MODULE_DEVICE_TABLE(pci,dptids); static int adpt_detect(struct scsi_host_template* sht) { struct pci_dev *pDev = NULL; - adpt_hba* pHba; + adpt_hba *pHba; + adpt_hba *next; PINFO("Detecting Adaptec I2O RAID controllers...\n"); @@ -206,7 +207,8 @@ static int adpt_detect(struct scsi_host_ } /* In INIT state, Activate IOPs */ - for (pHba = hba_chain; pHba; pHba = pHba->next) { + for (pHba = hba_chain; pHba; pHba = next) { + next = pHba->next; // Activate does get status , init outbound, and get hrt if (adpt_i2o_activate_hba(pHba) < 0) { adpt_i2o_delete_hba(pHba); @@ -243,7 +245,8 @@ rebuild_sys_tab: PDEBUG("HBA's in OPERATIONAL state\n"); printk("dpti: If you have a lot of devices this could take a few minutes.\n"); - for (pHba = hba_chain; pHba; pHba = pHba->next) { + for (pHba = hba_chain; pHba; pHba = next) { + next = pHba->next; printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); if (adpt_i2o_lct_get(pHba) < 0){ adpt_i2o_delete_hba(pHba); @@ -263,7 +266,8 @@ rebuild_sys_tab: adpt_sysfs_class = NULL; } - for (pHba = hba_chain; pHba; pHba = pHba->next) { + for (pHba = hba_chain; pHba; pHba = next) { + next = pHba->next; if (adpt_scsi_host_alloc(pHba, sht) < 0){ adpt_i2o_delete_hba(pHba); continue; @@ -1229,11 +1233,10 @@ static void adpt_i2o_delete_hba(adpt_hba } } pci_dev_put(pHba->pDev); - kfree(pHba); - if (adpt_sysfs_class) device_destroy(adpt_sysfs_class, MKDEV(DPTI_I2O_MAJOR, pHba->unit)); + kfree(pHba); if(hba_count <= 0){ unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/ibmvscsi/ibmvscsi.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/ibmvscsi/ibmvscsi.c --- linux-2.6.34-rc5/drivers/scsi/ibmvscsi/ibmvscsi.c 2010-04-23 10:39:56.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/ibmvscsi/ibmvscsi.c 2010-04-23 10:42:24.000000000 -0700 @@ -323,16 +323,6 @@ static void set_srp_direction(struct scs srp_cmd->buf_fmt = fmt; } -static void unmap_sg_list(int num_entries, - struct device *dev, - struct srp_direct_buf *md) -{ - int i; - - for (i = 0; i < num_entries; ++i) - dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); -} - /** * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format * @cmd: srp_cmd whose additional_data member will be unmapped @@ -350,24 +340,9 @@ static void unmap_cmd_data(struct srp_cm if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) return; - else if (out_fmt == SRP_DATA_DESC_DIRECT || - in_fmt == SRP_DATA_DESC_DIRECT) { - struct srp_direct_buf *data = - (struct srp_direct_buf *) cmd->add_data; - dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); - } else { - struct srp_indirect_buf *indirect = - (struct srp_indirect_buf *) cmd->add_data; - int num_mapped = indirect->table_desc.len / - sizeof(struct srp_direct_buf); - if (num_mapped <= MAX_INDIRECT_BUFS) { - unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); - return; - } - - unmap_sg_list(num_mapped, dev, evt_struct->ext_list); - } + if (evt_struct->cmnd) + scsi_dma_unmap(evt_struct->cmnd); } static int map_sg_list(struct scsi_cmnd *cmd, int nseg, diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/iscsi_tcp.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/iscsi_tcp.c --- linux-2.6.34-rc5/drivers/scsi/iscsi_tcp.c 2010-04-23 10:39:56.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/iscsi_tcp.c 2010-04-23 10:42:24.000000000 -0700 @@ -599,7 +599,7 @@ static void iscsi_sw_tcp_conn_stop(struc set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); - if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { + if (sock->sk->sk_sleep) { sock->sk->sk_err = EIO; wake_up_interruptible(sock->sk->sk_sleep); } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/lpfc/lpfc_bsg.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/lpfc/lpfc_bsg.c --- linux-2.6.34-rc5/drivers/scsi/lpfc/lpfc_bsg.c 2010-04-23 10:39:56.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/lpfc/lpfc_bsg.c 2010-04-23 10:42:24.000000000 -0700 @@ -433,7 +433,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba * dd_data = cmdiocbq->context1; /* normal completion and timeout crossed paths, already done */ if (!dd_data) { - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return; } @@ -1196,7 +1196,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *p dd_data = cmdiocbq->context1; /* normal completion and timeout crossed paths, already done */ if (!dd_data) { - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return; } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/qla2xxx/qla_attr.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/qla2xxx/qla_attr.c --- linux-2.6.34-rc5/drivers/scsi/qla2xxx/qla_attr.c 2010-04-23 10:39:57.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/qla2xxx/qla_attr.c 2010-04-23 10:42:24.000000000 -0700 @@ -2393,6 +2393,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *b return 0; done: + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (bsg_job->request->msgcode == FC_BSG_HST_CT) kfree(sp->fcport); kfree(sp->ctx); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/qla4xxx/ql4_mbx.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/qla4xxx/ql4_mbx.c --- linux-2.6.34-rc5/drivers/scsi/qla4xxx/ql4_mbx.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/qla4xxx/ql4_mbx.c 2010-04-23 10:42:24.000000000 -0700 @@ -467,7 +467,7 @@ int qla4xxx_get_fwddb_entry(struct scsi_ if (conn_err_detail) *conn_err_detail = mbox_sts[5]; if (tcp_source_port_num) - *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16; + *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16); if (connection_id) *connection_id = (uint16_t) mbox_sts[6] & 0x00FF; status = QLA_SUCCESS; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/scsi/wd7000.c linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/wd7000.c --- linux-2.6.34-rc5/drivers/scsi/wd7000.c 2010-04-23 10:39:58.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/scsi/wd7000.c 2010-04-23 10:42:24.000000000 -0700 @@ -1587,7 +1587,7 @@ static int wd7000_host_reset(struct scsi { Adapter *host = (Adapter *) SCpnt->device->host->hostdata; - spin_unlock_irq(SCpnt->device->host->host_lock); + spin_lock_irq(SCpnt->device->host->host_lock); if (wd7000_adapter_reset(host) < 0) { spin_unlock_irq(SCpnt->device->host->host_lock); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/serial/mcf.c linux-2.6.34-rc5-4b402210-rcu/drivers/serial/mcf.c --- linux-2.6.34-rc5/drivers/serial/mcf.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/serial/mcf.c 2010-04-23 10:42:24.000000000 -0700 @@ -263,6 +263,7 @@ static void mcf_set_termios(struct uart_ } spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR); writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR); writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR); @@ -379,6 +380,7 @@ static irqreturn_t mcf_interrupt(int irq static void mcf_config_port(struct uart_port *port, int flags) { port->type = PORT_MCF; + port->fifosize = MCFUART_TXFIFOSIZE; /* Clear mask, so no surprise interrupts. */ writeb(0, port->membase + MCFUART_UIMR); @@ -424,7 +426,7 @@ static int mcf_verify_port(struct uart_p /* * Define the basic serial functions we support. */ -static struct uart_ops mcf_uart_ops = { +static const struct uart_ops mcf_uart_ops = { .tx_empty = mcf_tx_empty, .get_mctrl = mcf_get_mctrl, .set_mctrl = mcf_set_mctrl, @@ -443,7 +445,7 @@ static struct uart_ops mcf_uart_ops = { .verify_port = mcf_verify_port, }; -static struct mcf_uart mcf_ports[3]; +static struct mcf_uart mcf_ports[4]; #define MCF_MAXPORTS ARRAY_SIZE(mcf_ports) diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/serial/serial_cs.c linux-2.6.34-rc5-4b402210-rcu/drivers/serial/serial_cs.c --- linux-2.6.34-rc5/drivers/serial/serial_cs.c 2010-04-23 10:39:58.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/serial/serial_cs.c 2010-04-23 10:42:24.000000000 -0700 @@ -105,6 +105,10 @@ struct serial_cfg_mem { * manfid 0x0160, 0x0104 * This card appears to have a 14.7456MHz clock. */ +/* Generic Modem: MD55x (GPRS/EDGE) have + * Elan VPU16551 UART with 14.7456MHz oscillator + * manfid 0x015D, 0x4C45 + */ static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_port *port) { port->uartclk = 14745600; @@ -196,6 +200,11 @@ static const struct serial_quirk quirks[ .multi = -1, .setup = quirk_setup_brainboxes_0104, }, { + .manfid = 0x015D, + .prodid = 0x4C45, + .multi = -1, + .setup = quirk_setup_brainboxes_0104, + }, { .manfid = MANFID_IBM, .prodid = ~0, .multi = -1, diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/staging/dt3155/dt3155_drv.c linux-2.6.34-rc5-4b402210-rcu/drivers/staging/dt3155/dt3155_drv.c --- linux-2.6.34-rc5/drivers/staging/dt3155/dt3155_drv.c 2010-04-23 10:40:00.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/staging/dt3155/dt3155_drv.c 2010-04-23 10:42:24.000000000 -0700 @@ -57,19 +57,8 @@ MA 02111-1307 USA extern void printques(int); -#ifdef MODULE #include #include - - -MODULE_LICENSE("GPL"); - -#endif - -#ifndef CONFIG_PCI -#error "DT3155 : Kernel PCI support not enabled (DT3155 drive requires PCI)" -#endif - #include #include #include @@ -84,6 +73,9 @@ MODULE_LICENSE("GPL"); #include "dt3155_io.h" #include "allocator.h" + +MODULE_LICENSE("GPL"); + /* Error variable. Zero means no error. */ int dt3155_errno = 0; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/core/driver.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/core/driver.c --- linux-2.6.34-rc5/drivers/usb/core/driver.c 2010-04-23 10:40:04.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/core/driver.c 2010-04-23 10:42:24.000000000 -0700 @@ -301,7 +301,7 @@ static int usb_probe_interface(struct de intf->condition = USB_INTERFACE_BINDING; - /* Bound interfaces are initially active. They are + /* Probed interfaces are initially active. They are * runtime-PM-enabled only if the driver has autosuspend support. * They are sensitive to their children's power states. */ @@ -437,11 +437,11 @@ int usb_driver_claim_interface(struct us iface->condition = USB_INTERFACE_BOUND; - /* Bound interfaces are initially active. They are + /* Claimed interfaces are initially inactive (suspended). They are * runtime-PM-enabled only if the driver has autosuspend support. * They are sensitive to their children's power states. */ - pm_runtime_set_active(dev); + pm_runtime_set_suspended(dev); pm_suspend_ignore_children(dev, false); if (driver->supports_autosuspend) pm_runtime_enable(dev); @@ -1170,7 +1170,7 @@ done: static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) { int status = 0; - int i = 0; + int i = 0, n = 0; struct usb_interface *intf; if (udev->state == USB_STATE_NOTATTACHED || @@ -1179,7 +1179,8 @@ static int usb_suspend_both(struct usb_d /* Suspend all the interfaces and then udev itself */ if (udev->actconfig) { - for (; i < udev->actconfig->desc.bNumInterfaces; i++) { + n = udev->actconfig->desc.bNumInterfaces; + for (i = n - 1; i >= 0; --i) { intf = udev->actconfig->interface[i]; status = usb_suspend_interface(udev, intf, msg); if (status != 0) @@ -1192,7 +1193,7 @@ static int usb_suspend_both(struct usb_d /* If the suspend failed, resume interfaces that did get suspended */ if (status != 0) { msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); - while (--i >= 0) { + while (++i < n) { intf = udev->actconfig->interface[i]; usb_resume_interface(udev, intf, msg, 0); } @@ -1263,13 +1264,47 @@ static int usb_resume_both(struct usb_de return status; } +static void choose_wakeup(struct usb_device *udev, pm_message_t msg) +{ + int w, i; + struct usb_interface *intf; + + /* Remote wakeup is needed only when we actually go to sleep. + * For things like FREEZE and QUIESCE, if the device is already + * autosuspended then its current wakeup setting is okay. + */ + if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) { + if (udev->state != USB_STATE_SUSPENDED) + udev->do_remote_wakeup = 0; + return; + } + + /* If remote wakeup is permitted, see whether any interface drivers + * actually want it. + */ + w = 0; + if (device_may_wakeup(&udev->dev) && udev->actconfig) { + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { + intf = udev->actconfig->interface[i]; + w |= intf->needs_remote_wakeup; + } + } + + /* If the device is autosuspended with the wrong wakeup setting, + * autoresume now so the setting can be changed. + */ + if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup) + pm_runtime_resume(&udev->dev); + udev->do_remote_wakeup = w; +} + /* The device lock is held by the PM core */ int usb_suspend(struct device *dev, pm_message_t msg) { struct usb_device *udev = to_usb_device(dev); do_unbind_rebind(udev, DO_UNBIND); - udev->do_remote_wakeup = device_may_wakeup(&udev->dev); + choose_wakeup(udev, msg); return usb_suspend_both(udev, msg); } diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/host/ehci.h linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci.h --- linux-2.6.34-rc5/drivers/usb/host/ehci.h 2010-04-23 10:40:04.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci.h 2010-04-23 10:42:24.000000000 -0700 @@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controlle int next_uframe; /* scan periodic, start here */ unsigned periodic_sched; /* periodic activity count */ - /* list of itds completed while clock_frame was still active */ + /* list of itds & sitds completed while clock_frame was still active */ struct list_head cached_itd_list; + struct list_head cached_sitd_list; unsigned clock_frame; /* per root hub port */ @@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci clear_bit (action, &ehci->actions); } -static void free_cached_itd_list(struct ehci_hcd *ehci); +static void free_cached_lists(struct ehci_hcd *ehci); /*-------------------------------------------------------------------------*/ diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/host/ehci-hcd.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-hcd.c --- linux-2.6.34-rc5/drivers/usb/host/ehci-hcd.c 2010-04-23 10:40:04.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-hcd.c 2010-04-23 10:42:24.000000000 -0700 @@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd */ ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->cached_itd_list); + INIT_LIST_HEAD(&ehci->cached_sitd_list); if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) return retval; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/host/ehci-hub.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-hub.c --- linux-2.6.34-rc5/drivers/usb/host/ehci-hub.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-hub.c 2010-04-23 10:42:24.000000000 -0700 @@ -801,7 +801,7 @@ static int ehci_hub_control ( * this bit; seems too long to spin routinely... */ retval = handshake(ehci, status_reg, - PORT_RESET, 0, 750); + PORT_RESET, 0, 1000); if (retval != 0) { ehci_err (ehci, "port %d reset error %d\n", wIndex + 1, retval); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/host/ehci-mem.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-mem.c --- linux-2.6.34-rc5/drivers/usb/host/ehci-mem.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-mem.c 2010-04-23 10:42:24.000000000 -0700 @@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_q static void ehci_mem_cleanup (struct ehci_hcd *ehci) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); if (ehci->async) qh_put (ehci->async); ehci->async = NULL; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/host/ehci-omap.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-omap.c --- linux-2.6.34-rc5/drivers/usb/host/ehci-omap.c 2010-04-23 10:40:04.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-omap.c 2010-04-23 10:42:24.000000000 -0700 @@ -629,11 +629,13 @@ static int ehci_hcd_omap_probe(struct pl } snprintf(supply, sizeof(supply), "hsusb%d", i); omap->regulator[i] = regulator_get(omap->dev, supply); - if (IS_ERR(omap->regulator[i])) + if (IS_ERR(omap->regulator[i])) { + omap->regulator[i] = NULL; dev_dbg(&pdev->dev, "failed to get ehci port%d regulator\n", i); - else + } else { regulator_enable(omap->regulator[i]); + } } ret = omap_start_ehc(omap, hcd); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/host/ehci-sched.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-sched.c --- linux-2.6.34-rc5/drivers/usb/host/ehci-sched.c 2010-04-23 10:40:04.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ehci-sched.c 2010-04-23 10:42:24.000000000 -0700 @@ -510,7 +510,7 @@ static int disable_periodic (struct ehci ehci_writel(ehci, cmd, &ehci->regs->command); /* posted write ... */ - free_cached_itd_list(ehci); + free_cached_lists(ehci); ehci->next_uframe = -1; return 0; @@ -2139,13 +2139,27 @@ sitd_complete ( (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); } iso_stream_put (ehci, stream); - /* OK to recycle this SITD now that its completion callback ran. */ + done: sitd->urb = NULL; - sitd->stream = NULL; - list_move(&sitd->sitd_list, &stream->free_list); - iso_stream_put(ehci, stream); - + if (ehci->clock_frame != sitd->frame) { + /* OK to recycle this SITD now. */ + sitd->stream = NULL; + list_move(&sitd->sitd_list, &stream->free_list); + iso_stream_put(ehci, stream); + } else { + /* HW might remember this SITD, so we can't recycle it yet. + * Move it to a safe place until a new frame starts. + */ + list_move(&sitd->sitd_list, &ehci->cached_sitd_list); + if (stream->refcount == 2) { + /* If iso_stream_put() were called here, stream + * would be freed. Instead, just prevent reuse. + */ + stream->ep->hcpriv = NULL; + stream->ep = NULL; + } + } return retval; } @@ -2211,9 +2225,10 @@ done: /*-------------------------------------------------------------------------*/ -static void free_cached_itd_list(struct ehci_hcd *ehci) +static void free_cached_lists(struct ehci_hcd *ehci) { struct ehci_itd *itd, *n; + struct ehci_sitd *sitd, *sn; list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { struct ehci_iso_stream *stream = itd->stream; @@ -2221,6 +2236,13 @@ static void free_cached_itd_list(struct list_move(&itd->itd_list, &stream->free_list); iso_stream_put(ehci, stream); } + + list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { + struct ehci_iso_stream *stream = sitd->stream; + sitd->stream = NULL; + list_move(&sitd->sitd_list, &stream->free_list); + iso_stream_put(ehci, stream); + } } /*-------------------------------------------------------------------------*/ @@ -2247,7 +2269,7 @@ scan_periodic (struct ehci_hcd *ehci) clock_frame = -1; } if (ehci->clock_frame != clock_frame) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); ehci->clock_frame = clock_frame; } clock %= mod; @@ -2414,7 +2436,7 @@ restart: clock = now; clock_frame = clock >> 3; if (ehci->clock_frame != clock_frame) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); ehci->clock_frame = clock_frame; } } else { diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/host/ohci-da8xx.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ohci-da8xx.c --- linux-2.6.34-rc5/drivers/usb/host/ohci-da8xx.c 2010-04-23 10:40:04.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/host/ohci-da8xx.c 2010-04-23 10:42:24.000000000 -0700 @@ -23,7 +23,7 @@ #error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." #endif -#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG) +#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG) static struct clk *usb11_clk; static struct clk *usb20_clk; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/misc/usbsevseg.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/misc/usbsevseg.c --- linux-2.6.34-rc5/drivers/usb/misc/usbsevseg.c 2010-04-23 10:40:05.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/misc/usbsevseg.c 2010-04-23 10:42:24.000000000 -0700 @@ -49,6 +49,7 @@ struct usb_sevsegdev { u16 textlength; u8 shadow_power; /* for PM */ + u8 has_interface_pm; }; /* sysfs_streq can't replace this completely @@ -68,12 +69,16 @@ static void update_display_powered(struc { int rc; - if (!mydev->shadow_power && mydev->powered) { + if (mydev->powered && !mydev->has_interface_pm) { rc = usb_autopm_get_interface(mydev->intf); if (rc < 0) return; + mydev->has_interface_pm = 1; } + if (mydev->shadow_power != 1) + return; + rc = usb_control_msg(mydev->udev, usb_sndctrlpipe(mydev->udev, 0), 0x12, @@ -86,8 +91,10 @@ static void update_display_powered(struc if (rc < 0) dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc); - if (mydev->shadow_power && !mydev->powered) + if (!mydev->powered && mydev->has_interface_pm) { usb_autopm_put_interface(mydev->intf); + mydev->has_interface_pm = 0; + } } static void update_display_mode(struct usb_sevsegdev *mydev) @@ -351,6 +358,10 @@ static int sevseg_probe(struct usb_inter mydev->intf = interface; usb_set_intfdata(interface, mydev); + /* PM */ + mydev->shadow_power = 1; /* currently active */ + mydev->has_interface_pm = 0; /* have not issued autopm_get */ + /*set defaults */ mydev->textmode = 0x02; /* ascii mode */ mydev->mode_msb = 0x06; /* 6 characters */ diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/serial/pl2303.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/pl2303.c --- linux-2.6.34-rc5/drivers/usb/serial/pl2303.c 2010-04-23 10:40:05.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/pl2303.c 2010-04-23 10:42:24.000000000 -0700 @@ -97,6 +97,7 @@ static const struct usb_device_id id_tab { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, { } /* Terminating entry */ }; diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/serial/pl2303.h linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/pl2303.h --- linux-2.6.34-rc5/drivers/usb/serial/pl2303.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/pl2303.h 2010-04-23 10:42:24.000000000 -0700 @@ -134,3 +134,7 @@ /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */ #define SANWA_VENDOR_ID 0x11ad #define SANWA_PRODUCT_ID 0x0001 + +/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ +#define ADLINK_VENDOR_ID 0x0b63 +#define ADLINK_ND6530_PRODUCT_ID 0x6530 diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/serial/qcaux.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/qcaux.c --- linux-2.6.34-rc5/drivers/usb/serial/qcaux.c 2010-04-23 10:40:05.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/qcaux.c 2010-04-23 10:42:24.000000000 -0700 @@ -42,6 +42,14 @@ #define CMOTECH_PRODUCT_CDU550 0x5553 #define CMOTECH_PRODUCT_CDX650 0x6512 +/* LG devices */ +#define LG_VENDOR_ID 0x1004 +#define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */ + +/* Sanyo devices */ +#define SANYO_VENDOR_ID 0x0474 +#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */ + static struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, @@ -51,6 +59,8 @@ static struct usb_device_id id_table[] = { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/serial/sierra.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/sierra.c --- linux-2.6.34-rc5/drivers/usb/serial/sierra.c 2010-04-23 10:40:05.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/sierra.c 2010-04-23 10:42:24.000000000 -0700 @@ -230,6 +230,7 @@ static const struct sierra_iface_info di static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ + { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */ { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/serial/ti_usb_3410_5052.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/ti_usb_3410_5052.c --- linux-2.6.34-rc5/drivers/usb/serial/ti_usb_3410_5052.c 2010-04-23 10:40:05.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/ti_usb_3410_5052.c 2010-04-23 10:42:24.000000000 -0700 @@ -172,7 +172,7 @@ static unsigned int product_5052_count; /* the array dimension is the number of default entries plus */ /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ /* null entry */ -static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = { +static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, @@ -180,6 +180,9 @@ static struct usb_device_id ti_id_table_ { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, @@ -192,7 +195,7 @@ static struct usb_device_id ti_id_table_ { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, }; -static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = { +static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, @@ -200,6 +203,9 @@ static struct usb_device_id ti_id_table_ { USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) }, @@ -287,6 +293,8 @@ MODULE_FIRMWARE("ti_5052.fw"); MODULE_FIRMWARE("mts_cdma.fw"); MODULE_FIRMWARE("mts_gsm.fw"); MODULE_FIRMWARE("mts_edge.fw"); +MODULE_FIRMWARE("mts_mt9234mu.fw"); +MODULE_FIRMWARE("mts_mt9234zba.fw"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes"); @@ -1687,6 +1695,7 @@ static int ti_download_firmware(struct t const struct firmware *fw_p; char buf[32]; + dbg("%s\n", __func__); /* try ID specific firmware first, then try generic firmware */ sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, dev->descriptor.idProduct); @@ -1703,7 +1712,15 @@ static int ti_download_firmware(struct t case MTS_EDGE_PRODUCT_ID: strcpy(buf, "mts_edge.fw"); break; - } + case MTS_MT9234MU_PRODUCT_ID: + strcpy(buf, "mts_mt9234mu.fw"); + break; + case MTS_MT9234ZBA_PRODUCT_ID: + strcpy(buf, "mts_mt9234zba.fw"); + break; + case MTS_MT9234ZBAOLD_PRODUCT_ID: + strcpy(buf, "mts_mt9234zba.fw"); + break; } } if (buf[0] == '\0') { if (tdev->td_is_3410) @@ -1718,7 +1735,7 @@ static int ti_download_firmware(struct t return -ENOENT; } if (fw_p->size > TI_FIRMWARE_BUF_SIZE) { - dev_err(&dev->dev, "%s - firmware too large\n", __func__); + dev_err(&dev->dev, "%s - firmware too large %d \n", __func__, fw_p->size); return -ENOENT; } @@ -1730,6 +1747,7 @@ static int ti_download_firmware(struct t status = ti_do_download(dev, pipe, buffer, fw_p->size); kfree(buffer); } else { + dbg("%s ENOMEM\n", __func__); status = -ENOMEM; } release_firmware(fw_p); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/serial/ti_usb_3410_5052.h linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/ti_usb_3410_5052.h --- linux-2.6.34-rc5/drivers/usb/serial/ti_usb_3410_5052.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/serial/ti_usb_3410_5052.h 2010-04-23 10:42:24.000000000 -0700 @@ -45,6 +45,9 @@ #define MTS_CDMA_PRODUCT_ID 0xF110 #define MTS_GSM_PRODUCT_ID 0xF111 #define MTS_EDGE_PRODUCT_ID 0xF112 +#define MTS_MT9234MU_PRODUCT_ID 0xF114 +#define MTS_MT9234ZBA_PRODUCT_ID 0xF115 +#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319 /* Commands */ #define TI_GET_VERSION 0x01 diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/usb/wusbcore/devconnect.c linux-2.6.34-rc5-4b402210-rcu/drivers/usb/wusbcore/devconnect.c --- linux-2.6.34-rc5/drivers/usb/wusbcore/devconnect.c 2010-04-23 10:40:05.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/usb/wusbcore/devconnect.c 2010-04-23 10:42:24.000000000 -0700 @@ -438,7 +438,7 @@ static void __wusbhc_keep_alive(struct w old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); keep_alives = 0; for (cnt = 0; - keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max; + keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max; cnt++) { unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); diff -urpNa -X dontdiff linux-2.6.34-rc5/drivers/virtio/virtio_balloon.c linux-2.6.34-rc5-4b402210-rcu/drivers/virtio/virtio_balloon.c --- linux-2.6.34-rc5/drivers/virtio/virtio_balloon.c 2010-04-23 10:40:07.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/drivers/virtio/virtio_balloon.c 2010-04-23 10:42:24.000000000 -0700 @@ -103,7 +103,8 @@ static void fill_balloon(struct virtio_b num = min(num, ARRAY_SIZE(vb->pfns)); for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) { - struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY); + struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | + __GFP_NOMEMALLOC | __GFP_NOWARN); if (!page) { if (printk_ratelimit()) dev_printk(KERN_INFO, &vb->vdev->dev, diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/afs/mntpt.c linux-2.6.34-rc5-4b402210-rcu/fs/afs/mntpt.c --- linux-2.6.34-rc5/fs/afs/mntpt.c 2010-04-23 10:40:08.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/afs/mntpt.c 2010-04-23 10:42:24.000000000 -0700 @@ -138,9 +138,9 @@ static struct vfsmount *afs_mntpt_do_aut { struct afs_super_info *super; struct vfsmount *mnt; - struct page *page = NULL; + struct page *page; size_t size; - char *buf, *devname = NULL, *options = NULL; + char *buf, *devname, *options; int ret; _enter("{%s}", mntpt->d_name.name); @@ -150,22 +150,22 @@ static struct vfsmount *afs_mntpt_do_aut ret = -EINVAL; size = mntpt->d_inode->i_size; if (size > PAGE_SIZE - 1) - goto error; + goto error_no_devname; ret = -ENOMEM; devname = (char *) get_zeroed_page(GFP_KERNEL); if (!devname) - goto error; + goto error_no_devname; options = (char *) get_zeroed_page(GFP_KERNEL); if (!options) - goto error; + goto error_no_options; /* read the contents of the AFS special symlink */ page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); - goto error; + goto error_no_page; } ret = -EIO; @@ -196,12 +196,12 @@ static struct vfsmount *afs_mntpt_do_aut return mnt; error: - if (page) - page_cache_release(page); - if (devname) - free_page((unsigned long) devname); - if (options) - free_page((unsigned long) options); + page_cache_release(page); +error_no_page: + free_page((unsigned long) options); +error_no_options: + free_page((unsigned long) devname); +error_no_devname: _leave(" = %d", ret); return ERR_PTR(ret); } diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/binfmt_flat.c linux-2.6.34-rc5-4b402210-rcu/fs/binfmt_flat.c --- linux-2.6.34-rc5/fs/binfmt_flat.c 2010-04-23 10:40:08.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/binfmt_flat.c 2010-04-23 10:42:24.000000000 -0700 @@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_i if (!flat_reloc_valid(r, start_brk - start_data + text_len)) { printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)", - (int) r,(int)(start_brk-start_code),(int)text_len); + (int) r,(int)(start_brk-start_data+text_len),(int)text_len); goto failed; } diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/jfs/inode.c linux-2.6.34-rc5-4b402210-rcu/fs/jfs/inode.c --- linux-2.6.34-rc5/fs/jfs/inode.c 2010-04-23 10:40:11.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/jfs/inode.c 2010-04-23 10:42:24.000000000 -0700 @@ -61,7 +61,7 @@ struct inode *jfs_iget(struct super_bloc inode->i_op = &page_symlink_inode_operations; inode->i_mapping->a_ops = &jfs_aops; } else { - inode->i_op = &jfs_symlink_inode_operations; + inode->i_op = &jfs_fast_symlink_inode_operations; /* * The inline data should be null-terminated, but * don't let on-disk corruption crash the kernel diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/jfs/jfs_dmap.c linux-2.6.34-rc5-4b402210-rcu/fs/jfs/jfs_dmap.c --- linux-2.6.34-rc5/fs/jfs/jfs_dmap.c 2010-04-23 10:40:11.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/jfs/jfs_dmap.c 2010-04-23 10:42:24.000000000 -0700 @@ -196,7 +196,7 @@ int dbMount(struct inode *ipbmap) bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); - bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth); + bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); @@ -288,7 +288,7 @@ int dbSync(struct inode *ipbmap) dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); - dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth); + dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight); dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); @@ -1441,7 +1441,7 @@ dbAllocAG(struct bmap * bmp, int agno, s * tree index of this allocation group within the control page. */ agperlev = - (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth; + (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth; ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1)); /* dmap control page trees fan-out by 4 and a single allocation @@ -1460,7 +1460,7 @@ dbAllocAG(struct bmap * bmp, int agno, s * the subtree to find the leftmost leaf that describes this * free space. */ - for (k = bmp->db_agheigth; k > 0; k--) { + for (k = bmp->db_agheight; k > 0; k--) { for (n = 0, m = (ti << 2) + 1; n < 4; n++) { if (l2nb <= dcp->stree[m + n]) { ti = m + n; @@ -3607,7 +3607,7 @@ void dbFinalizeBmap(struct inode *ipbmap } /* - * compute db_aglevel, db_agheigth, db_width, db_agstart: + * compute db_aglevel, db_agheight, db_width, db_agstart: * an ag is covered in aglevel dmapctl summary tree, * at agheight level height (from leaf) with agwidth number of nodes * each, which starts at agstart index node of the smmary tree node @@ -3616,9 +3616,9 @@ void dbFinalizeBmap(struct inode *ipbmap bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize); l2nl = bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL); - bmp->db_agheigth = l2nl >> 1; - bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1)); - for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0; + bmp->db_agheight = l2nl >> 1; + bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1)); + for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0; i--) { bmp->db_agstart += n; n <<= 2; diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/jfs/jfs_dmap.h linux-2.6.34-rc5-4b402210-rcu/fs/jfs/jfs_dmap.h --- linux-2.6.34-rc5/fs/jfs/jfs_dmap.h 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/fs/jfs/jfs_dmap.h 2010-04-23 10:42:24.000000000 -0700 @@ -210,7 +210,7 @@ struct dbmap_disk { __le32 dn_maxag; /* 4: max active alloc group number */ __le32 dn_agpref; /* 4: preferred alloc group (hint) */ __le32 dn_aglevel; /* 4: dmapctl level holding the AG */ - __le32 dn_agheigth; /* 4: height in dmapctl of the AG */ + __le32 dn_agheight; /* 4: height in dmapctl of the AG */ __le32 dn_agwidth; /* 4: width in dmapctl of the AG */ __le32 dn_agstart; /* 4: start tree index at AG height */ __le32 dn_agl2size; /* 4: l2 num of blks per alloc group */ @@ -229,7 +229,7 @@ struct dbmap { int dn_maxag; /* max active alloc group number */ int dn_agpref; /* preferred alloc group (hint) */ int dn_aglevel; /* dmapctl level holding the AG */ - int dn_agheigth; /* height in dmapctl of the AG */ + int dn_agheight; /* height in dmapctl of the AG */ int dn_agwidth; /* width in dmapctl of the AG */ int dn_agstart; /* start tree index at AG height */ int dn_agl2size; /* l2 num of blks per alloc group */ @@ -255,7 +255,7 @@ struct bmap { #define db_agsize db_bmap.dn_agsize #define db_agl2size db_bmap.dn_agl2size #define db_agwidth db_bmap.dn_agwidth -#define db_agheigth db_bmap.dn_agheigth +#define db_agheight db_bmap.dn_agheight #define db_agstart db_bmap.dn_agstart #define db_numag db_bmap.dn_numag #define db_maxlevel db_bmap.dn_maxlevel diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/jfs/jfs_inode.h linux-2.6.34-rc5-4b402210-rcu/fs/jfs/jfs_inode.h --- linux-2.6.34-rc5/fs/jfs/jfs_inode.h 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/jfs/jfs_inode.h 2010-04-23 10:42:24.000000000 -0700 @@ -48,5 +48,6 @@ extern const struct file_operations jfs_ extern const struct inode_operations jfs_file_inode_operations; extern const struct file_operations jfs_file_operations; extern const struct inode_operations jfs_symlink_inode_operations; +extern const struct inode_operations jfs_fast_symlink_inode_operations; extern const struct dentry_operations jfs_ci_dentry_operations; #endif /* _H_JFS_INODE */ diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/jfs/namei.c linux-2.6.34-rc5-4b402210-rcu/fs/jfs/namei.c --- linux-2.6.34-rc5/fs/jfs/namei.c 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/jfs/namei.c 2010-04-23 10:42:24.000000000 -0700 @@ -956,7 +956,7 @@ static int jfs_symlink(struct inode *dip */ if (ssize <= IDATASIZE) { - ip->i_op = &jfs_symlink_inode_operations; + ip->i_op = &jfs_fast_symlink_inode_operations; i_fastsymlink = JFS_IP(ip)->i_inline; memcpy(i_fastsymlink, name, ssize); @@ -978,7 +978,7 @@ static int jfs_symlink(struct inode *dip else { jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); - ip->i_op = &page_symlink_inode_operations; + ip->i_op = &jfs_symlink_inode_operations; ip->i_mapping->a_ops = &jfs_aops; /* diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/jfs/resize.c linux-2.6.34-rc5-4b402210-rcu/fs/jfs/resize.c --- linux-2.6.34-rc5/fs/jfs/resize.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/fs/jfs/resize.c 2010-04-23 10:42:24.000000000 -0700 @@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, struct inode *iplist[1]; struct jfs_superblock *j_sb, *j_sb2; uint old_agsize; + int agsizechanged = 0; struct buffer_head *bh, *bh2; /* If the volume hasn't grown, get out now */ @@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, */ if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) goto error_out; + + agsizechanged |= (bmp->db_agsize != old_agsize); + /* * the map now has extended to cover additional nblocks: * dn_mapsize = oldMapsize + nblocks; @@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, * will correctly identify the new ag); */ /* if new AG size the same as old AG size, done! */ - if (bmp->db_agsize != old_agsize) { + if (agsizechanged) { if ((rc = diExtendFS(ipimap, ipbmap))) goto error_out; diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/jfs/symlink.c linux-2.6.34-rc5-4b402210-rcu/fs/jfs/symlink.c --- linux-2.6.34-rc5/fs/jfs/symlink.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/fs/jfs/symlink.c 2010-04-23 10:42:24.000000000 -0700 @@ -29,9 +29,21 @@ static void *jfs_follow_link(struct dent return NULL; } -const struct inode_operations jfs_symlink_inode_operations = { +const struct inode_operations jfs_fast_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = jfs_follow_link, + .setattr = jfs_setattr, + .setxattr = jfs_setxattr, + .getxattr = jfs_getxattr, + .listxattr = jfs_listxattr, + .removexattr = jfs_removexattr, +}; + +const struct inode_operations jfs_symlink_inode_operations = { + .readlink = generic_readlink, + .follow_link = page_follow_link_light, + .put_link = page_put_link, + .setattr = jfs_setattr, .setxattr = jfs_setxattr, .getxattr = jfs_getxattr, .listxattr = jfs_listxattr, diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/logfs/gc.c linux-2.6.34-rc5-4b402210-rcu/fs/logfs/gc.c --- linux-2.6.34-rc5/fs/logfs/gc.c 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/logfs/gc.c 2010-04-23 10:42:24.000000000 -0700 @@ -459,6 +459,14 @@ static void __logfs_gc_pass(struct super struct logfs_block *block; int round, progress, last_progress = 0; + /* + * Doing too many changes to the segfile at once would result + * in a large number of aliases. Write the journal before + * things get out of hand. + */ + if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES) + logfs_write_anchor(sb); + if (no_free_segments(sb) >= target && super->s_no_object_aliases < MAX_OBJ_ALIASES) return; diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/logfs/journal.c linux-2.6.34-rc5-4b402210-rcu/fs/logfs/journal.c --- linux-2.6.34-rc5/fs/logfs/journal.c 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/logfs/journal.c 2010-04-23 10:42:24.000000000 -0700 @@ -389,7 +389,10 @@ static void journal_get_erase_count(stru static int journal_erase_segment(struct logfs_area *area) { struct super_block *sb = area->a_sb; - struct logfs_segment_header sh; + union { + struct logfs_segment_header sh; + unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)]; + } u; u64 ofs; int err; @@ -397,20 +400,21 @@ static int journal_erase_segment(struct if (err) return err; - sh.pad = 0; - sh.type = SEG_JOURNAL; - sh.level = 0; - sh.segno = cpu_to_be32(area->a_segno); - sh.ec = cpu_to_be32(area->a_erase_count); - sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); - sh.crc = logfs_crc32(&sh, sizeof(sh), 4); + memset(&u, 0, sizeof(u)); + u.sh.pad = 0; + u.sh.type = SEG_JOURNAL; + u.sh.level = 0; + u.sh.segno = cpu_to_be32(area->a_segno); + u.sh.ec = cpu_to_be32(area->a_erase_count); + u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec); + u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4); /* This causes a bug in segment.c. Not yet. */ //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0); ofs = dev_ofs(sb, area->a_segno, 0); - area->a_used_bytes = ALIGN(sizeof(sh), 16); - logfs_buf_write(area, ofs, &sh, sizeof(sh)); + area->a_used_bytes = sizeof(u); + logfs_buf_write(area, ofs, &u, sizeof(u)); return 0; } @@ -494,6 +498,8 @@ static void account_shadows(struct super btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow); btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow); + btree_grim_visitor32(&tree->segment_map, 0, NULL); + tree->no_shadowed_segments = 0; if (li->li_block) { /* @@ -607,9 +613,9 @@ static size_t __logfs_write_je(struct su if (len == 0) return logfs_write_header(super, header, 0, type); + BUG_ON(len > sb->s_blocksize); compr_len = logfs_compress(buf, data, len, sb->s_blocksize); if (compr_len < 0 || type == JE_ANCHOR) { - BUG_ON(len > sb->s_blocksize); memcpy(data, buf, len); compr_len = len; compr = COMPR_NONE; @@ -661,6 +667,7 @@ static int logfs_write_je_buf(struct sup if (ofs < 0) return ofs; logfs_buf_write(area, ofs, super->s_compressed_je, len); + BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES); super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs); return 0; } diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/logfs/logfs.h linux-2.6.34-rc5-4b402210-rcu/fs/logfs/logfs.h --- linux-2.6.34-rc5/fs/logfs/logfs.h 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/logfs/logfs.h 2010-04-23 10:42:24.000000000 -0700 @@ -257,10 +257,14 @@ struct logfs_shadow { * struct shadow_tree * @new: shadows where old_ofs==0, indexed by new_ofs * @old: shadows where old_ofs!=0, indexed by old_ofs + * @segment_map: bitfield of segments containing shadows + * @no_shadowed_segment: number of segments containing shadows */ struct shadow_tree { struct btree_head64 new; struct btree_head64 old; + struct btree_head32 segment_map; + int no_shadowed_segments; }; struct object_alias_item { @@ -305,13 +309,14 @@ typedef int write_alias_t(struct super_b level_t level, int child_no, __be64 val); struct logfs_block_ops { void (*write_block)(struct logfs_block *block); - gc_level_t (*block_level)(struct logfs_block *block); void (*free_block)(struct super_block *sb, struct logfs_block*block); int (*write_alias)(struct super_block *sb, struct logfs_block *block, write_alias_t *write_one_alias); }; +#define MAX_JOURNAL_ENTRIES 256 + struct logfs_super { struct mtd_info *s_mtd; /* underlying device */ struct block_device *s_bdev; /* underlying device */ @@ -378,7 +383,7 @@ struct logfs_super { u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */ u64 s_last_version; struct logfs_area *s_journal_area; /* open journal segment */ - __be64 s_je_array[64]; + __be64 s_je_array[MAX_JOURNAL_ENTRIES]; int s_no_je; int s_sum_index; /* for the 12 summaries */ @@ -722,4 +727,10 @@ static inline struct logfs_area *get_are return logfs_super(sb)->s_area[(__force u8)gc_level]; } +static inline void logfs_mempool_destroy(mempool_t *pool) +{ + if (pool) + mempool_destroy(pool); +} + #endif diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/logfs/readwrite.c linux-2.6.34-rc5-4b402210-rcu/fs/logfs/readwrite.c --- linux-2.6.34-rc5/fs/logfs/readwrite.c 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/logfs/readwrite.c 2010-04-23 10:42:24.000000000 -0700 @@ -430,25 +430,6 @@ static void inode_write_block(struct log } } -static gc_level_t inode_block_level(struct logfs_block *block) -{ - BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER); - return GC_LEVEL(LOGFS_MAX_LEVELS); -} - -static gc_level_t indirect_block_level(struct logfs_block *block) -{ - struct page *page; - struct inode *inode; - u64 bix; - level_t level; - - page = block->page; - inode = page->mapping->host; - logfs_unpack_index(page->index, &bix, &level); - return expand_level(inode->i_ino, level); -} - /* * This silences a false, yet annoying gcc warning. I hate it when my editor * jumps into bitops.h each time I recompile this file. @@ -587,14 +568,12 @@ static void indirect_free_block(struct s static struct logfs_block_ops inode_block_ops = { .write_block = inode_write_block, - .block_level = inode_block_level, .free_block = inode_free_block, .write_alias = inode_write_alias, }; struct logfs_block_ops indirect_block_ops = { .write_block = indirect_write_block, - .block_level = indirect_block_level, .free_block = indirect_free_block, .write_alias = indirect_write_alias, }; @@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *in mempool_free(shadow, super->s_shadow_pool); } +static void mark_segment(struct shadow_tree *tree, u32 segno) +{ + int err; + + if (!btree_lookup32(&tree->segment_map, segno)) { + err = btree_insert32(&tree->segment_map, segno, (void *)1, + GFP_NOFS); + BUG_ON(err); + tree->no_shadowed_segments++; + } +} + /** * fill_shadow_tree - Propagate shadow tree changes due to a write * @inode: Inode owning the page @@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inod super->s_dirty_used_bytes += shadow->new_len; super->s_dirty_free_bytes += shadow->old_len; + mark_segment(tree, shadow->old_ofs >> super->s_segshift); + mark_segment(tree, shadow->new_ofs >> super->s_segshift); } } @@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode return logfs_truncate_direct(inode, size); } -int logfs_truncate(struct inode *inode, u64 size) +/* + * Truncate, by changing the segment file, can consume a fair amount + * of resources. So back off from time to time and do some GC. + * 8 or 2048 blocks should be well within safety limits even if + * every single block resided in a different segment. + */ +#define TRUNCATE_STEP (8 * 1024 * 1024) +int logfs_truncate(struct inode *inode, u64 target) { struct super_block *sb = inode->i_sb; - int err; + u64 size = i_size_read(inode); + int err = 0; - logfs_get_wblocks(sb, NULL, 1); - err = __logfs_truncate(inode, size); - if (!err) - err = __logfs_write_inode(inode, 0); - logfs_put_wblocks(sb, NULL, 1); + size = ALIGN(size, TRUNCATE_STEP); + while (size > target) { + if (size > TRUNCATE_STEP) + size -= TRUNCATE_STEP; + else + size = 0; + if (size < target) + size = target; + + logfs_get_wblocks(sb, NULL, 1); + err = __logfs_truncate(inode, target); + if (!err) + err = __logfs_write_inode(inode, 0); + logfs_put_wblocks(sb, NULL, 1); + } if (!err) - err = vmtruncate(inode, size); + err = vmtruncate(inode, target); /* I don't trust error recovery yet. */ WARN_ON(err); @@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block struct logfs_super *super = logfs_super(sb); destroy_meta_inode(super->s_segfile_inode); - if (super->s_block_pool) - mempool_destroy(super->s_block_pool); - if (super->s_shadow_pool) - mempool_destroy(super->s_shadow_pool); + logfs_mempool_destroy(super->s_block_pool); + logfs_mempool_destroy(super->s_shadow_pool); } diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/logfs/segment.c linux-2.6.34-rc5-4b402210-rcu/fs/logfs/segment.c --- linux-2.6.34-rc5/fs/logfs/segment.c 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/logfs/segment.c 2010-04-23 10:42:24.000000000 -0700 @@ -183,14 +183,8 @@ static int btree_write_alias(struct supe return 0; } -static gc_level_t btree_block_level(struct logfs_block *block) -{ - return expand_level(block->ino, block->level); -} - static struct logfs_block_ops btree_block_ops = { .write_block = btree_write_block, - .block_level = btree_block_level, .free_block = __free_block, .write_alias = btree_write_alias, }; @@ -919,7 +913,7 @@ err: for (i--; i >= 0; i--) free_area(super->s_area[i]); free_area(super->s_journal_area); - mempool_destroy(super->s_alias_pool); + logfs_mempool_destroy(super->s_alias_pool); return -ENOMEM; } diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/logfs/super.c linux-2.6.34-rc5-4b402210-rcu/fs/logfs/super.c --- linux-2.6.34-rc5/fs/logfs/super.c 2010-04-23 10:40:12.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/logfs/super.c 2010-04-23 10:42:24.000000000 -0700 @@ -12,6 +12,7 @@ #include "logfs.h" #include #include +#include #include #include #include @@ -137,6 +138,10 @@ static int logfs_sb_set(struct super_blo sb->s_fs_info = super; sb->s_mtd = super->s_mtd; sb->s_bdev = super->s_bdev; + if (sb->s_bdev) + sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; + if (sb->s_mtd) + sb->s_bdi = sb->s_mtd->backing_dev_info; return 0; } @@ -452,6 +457,8 @@ static int logfs_read_sb(struct super_bl btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool); btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool); + btree_init_mempool32(&super->s_shadow_tree.segment_map, + super->s_btree_pool); ret = logfs_init_mapping(sb); if (ret) @@ -516,8 +523,8 @@ static void logfs_kill_sb(struct super_b if (super->s_erase_page) __free_page(super->s_erase_page); super->s_devops->put_device(sb); - mempool_destroy(super->s_btree_pool); - mempool_destroy(super->s_alias_pool); + logfs_mempool_destroy(super->s_btree_pool); + logfs_mempool_destroy(super->s_alias_pool); kfree(super); log_super("LogFS: Finished unmounting\n"); } diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/quota/dquot.c linux-2.6.34-rc5-4b402210-rcu/fs/quota/dquot.c --- linux-2.6.34-rc5/fs/quota/dquot.c 2010-04-23 10:40:14.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/quota/dquot.c 2010-04-23 10:42:24.000000000 -0700 @@ -80,8 +80,6 @@ #include -#define __DQUOT_PARANOIA - /* * There are three quota SMP locks. dq_list_lock protects all lists with quotas * and quota formats, dqstats structure containing statistics about the lists @@ -695,7 +693,7 @@ void dqput(struct dquot *dquot) if (!dquot) return; -#ifdef __DQUOT_PARANOIA +#ifdef CONFIG_QUOTA_DEBUG if (!atomic_read(&dquot->dq_count)) { printk("VFS: dqput: trying to free free dquot\n"); printk("VFS: device %s, dquot of %s %d\n", @@ -748,7 +746,7 @@ we_slept: goto we_slept; } atomic_dec(&dquot->dq_count); -#ifdef __DQUOT_PARANOIA +#ifdef CONFIG_QUOTA_DEBUG /* sanity check */ BUG_ON(!list_empty(&dquot->dq_free)); #endif @@ -845,7 +843,7 @@ we_slept: dquot = NULL; goto out; } -#ifdef __DQUOT_PARANOIA +#ifdef CONFIG_QUOTA_DEBUG BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ #endif out: @@ -874,7 +872,7 @@ static int dqinit_needed(struct inode *i static void add_dquot_ref(struct super_block *sb, int type) { struct inode *inode, *old_inode = NULL; -#ifdef __DQUOT_PARANOIA +#ifdef CONFIG_QUOTA_DEBUG int reserved = 0; #endif @@ -882,7 +880,7 @@ static void add_dquot_ref(struct super_b list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) continue; -#ifdef __DQUOT_PARANOIA +#ifdef CONFIG_QUOTA_DEBUG if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; #endif @@ -907,7 +905,7 @@ static void add_dquot_ref(struct super_b spin_unlock(&inode_lock); iput(old_inode); -#ifdef __DQUOT_PARANOIA +#ifdef CONFIG_QUOTA_DEBUG if (reserved) { printk(KERN_WARNING "VFS (%s): Writes happened before quota" " was turned on thus quota information is probably " @@ -940,7 +938,7 @@ static int remove_inode_dquot_ref(struct inode->i_dquot[type] = NULL; if (dquot) { if (dqput_blocks(dquot)) { -#ifdef __DQUOT_PARANOIA +#ifdef CONFIG_QUOTA_DEBUG if (atomic_read(&dquot->dq_count) != 1) printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); #endif diff -urpNa -X dontdiff linux-2.6.34-rc5/fs/quota/Kconfig linux-2.6.34-rc5-4b402210-rcu/fs/quota/Kconfig --- linux-2.6.34-rc5/fs/quota/Kconfig 2010-04-23 10:40:14.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/fs/quota/Kconfig 2010-04-23 10:42:24.000000000 -0700 @@ -33,6 +33,14 @@ config PRINT_QUOTA_WARNING Note that this behavior is currently deprecated and may go away in future. Please use notification via netlink socket instead. +config QUOTA_DEBUG + bool "Additional quota sanity checks" + depends on QUOTA + default n + help + If you say Y here, quota subsystem will perform some additional + sanity checks of quota internal structures. If unsure, say N. + # Generic support for tree structured quota files. Selected when needed. config QUOTA_TREE tristate diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/cgroup.h linux-2.6.34-rc5-4b402210-rcu/include/linux/cgroup.h --- linux-2.6.34-rc5/include/linux/cgroup.h 2010-04-23 10:40:15.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/cgroup.h 2010-05-08 08:02:22.000000000 -0700 @@ -530,6 +530,7 @@ static inline struct cgroup_subsys_state { return rcu_dereference_check(task->cgroups->subsys[subsys_id], rcu_read_lock_held() || + lockdep_is_held(&task->alloc_lock) || cgroup_lock_is_held()); } diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/firewire-cdev.h linux-2.6.34-rc5-4b402210-rcu/include/linux/firewire-cdev.h --- linux-2.6.34-rc5/include/linux/firewire-cdev.h 2010-04-23 10:40:16.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/firewire-cdev.h 2010-04-23 10:42:24.000000000 -0700 @@ -17,7 +17,7 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/firewire-constants.h linux-2.6.34-rc5-4b402210-rcu/include/linux/firewire-constants.h --- linux-2.6.34-rc5/include/linux/firewire-constants.h 2010-04-23 10:40:16.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/firewire-constants.h 2010-04-23 10:42:24.000000000 -0700 @@ -17,7 +17,7 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/kvm_host.h linux-2.6.34-rc5-4b402210-rcu/include/linux/kvm_host.h --- linux-2.6.34-rc5/include/linux/kvm_host.h 2010-04-23 10:40:16.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/kvm_host.h 2010-04-23 10:42:24.000000000 -0700 @@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache */ struct kvm_io_bus { int dev_count; -#define NR_IOBUS_DEVS 6 +#define NR_IOBUS_DEVS 200 struct kvm_io_device *devs[NR_IOBUS_DEVS]; }; @@ -119,6 +119,11 @@ struct kvm_memory_slot { int user_alloc; }; +static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) +{ + return ALIGN(memslot->npages, BITS_PER_LONG) / 8; +} + struct kvm_kernel_irq_routing_entry { u32 gsi; u32 type; diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/rcupdate.h linux-2.6.34-rc5-4b402210-rcu/include/linux/rcupdate.h --- linux-2.6.34-rc5/include/linux/rcupdate.h 2010-04-23 10:40:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/rcupdate.h 2010-05-08 08:02:22.000000000 -0700 @@ -56,8 +56,6 @@ struct rcu_head { }; /* Exported common interfaces */ -extern void synchronize_rcu_bh(void); -extern void synchronize_sched(void); extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); @@ -66,8 +64,6 @@ extern int sched_expedited_torture_stats /* Internal to kernel */ extern void rcu_init(void); -extern int rcu_scheduler_active; -extern void rcu_scheduler_starting(void); #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include @@ -106,12 +102,13 @@ extern int debug_lockdep_rcu_enabled(voi /** * rcu_read_lock_held - might we be in RCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ static inline int rcu_read_lock_held(void) { @@ -129,13 +126,15 @@ extern int rcu_read_lock_bh_held(void); /** * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an - * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, - * this assumes we are in an RCU-sched read-side critical section unless it - * can prove otherwise. Note that disabling of preemption (including - * disabling irqs) counts as an RCU-sched read-side critical section. + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an + * RCU-sched read-side critical section. In absence of + * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side + * critical section unless it can prove otherwise. Note that disabling + * of preemption (including disabling irqs) counts as an RCU-sched + * read-side critical section. * - * Check rcu_scheduler_active to prevent false positives during boot. + * Check debug_lockdep_rcu_enabled() to prevent false positives during boot + * and while lockdep is disabled. */ #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) @@ -177,7 +176,7 @@ static inline int rcu_read_lock_bh_held( #ifdef CONFIG_PREEMPT static inline int rcu_read_lock_sched_held(void) { - return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); + return preempt_count() != 0 || irqs_disabled(); } #else /* #ifdef CONFIG_PREEMPT */ static inline int rcu_read_lock_sched_held(void) @@ -190,6 +189,17 @@ static inline int rcu_read_lock_sched_he #ifdef CONFIG_PROVE_RCU +extern int rcu_my_thread_group_empty(void); + +#define __do_rcu_dereference_check(c) \ + do { \ + static bool __warned; \ + if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ + __warned = true; \ + lockdep_rcu_dereference(__FILE__, __LINE__); \ + } \ + } while (0) + /** * rcu_dereference_check - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing @@ -219,8 +229,7 @@ static inline int rcu_read_lock_sched_he */ #define rcu_dereference_check(p, c) \ ({ \ - if (debug_lockdep_rcu_enabled() && !(c)) \ - lockdep_rcu_dereference(__FILE__, __LINE__); \ + __do_rcu_dereference_check(c); \ rcu_dereference_raw(p); \ }) @@ -237,8 +246,7 @@ static inline int rcu_read_lock_sched_he */ #define rcu_dereference_protected(p, c) \ ({ \ - if (debug_lockdep_rcu_enabled() && !(c)) \ - lockdep_rcu_dereference(__FILE__, __LINE__); \ + __do_rcu_dereference_check(c); \ (p); \ }) diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/rcutiny.h linux-2.6.34-rc5-4b402210-rcu/include/linux/rcutiny.h --- linux-2.6.34-rc5/include/linux/rcutiny.h 2010-04-23 10:40:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/rcutiny.h 2010-05-08 08:02:22.000000000 -0700 @@ -29,6 +29,10 @@ void rcu_sched_qs(int cpu); void rcu_bh_qs(int cpu); +static inline void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); +} #define __rcu_read_lock() preempt_disable() #define __rcu_read_unlock() preempt_enable() @@ -74,7 +78,17 @@ static inline void rcu_sched_force_quies { } -#define synchronize_rcu synchronize_sched +extern void synchronize_sched(void); + +static inline void synchronize_rcu(void) +{ + synchronize_sched(); +} + +static inline void synchronize_rcu_bh(void) +{ + synchronize_sched(); +} static inline void synchronize_rcu_expedited(void) { @@ -114,4 +128,17 @@ static inline int rcu_preempt_depth(void return 0; } +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +extern int rcu_scheduler_active __read_mostly; +extern void rcu_scheduler_starting(void); + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline void rcu_scheduler_starting(void) +{ +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + #endif /* __LINUX_RCUTINY_H */ diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/rcutree.h linux-2.6.34-rc5-4b402210-rcu/include/linux/rcutree.h --- linux-2.6.34-rc5/include/linux/rcutree.h 2010-04-23 10:40:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/rcutree.h 2010-05-08 08:02:22.000000000 -0700 @@ -34,6 +34,7 @@ struct notifier_block; extern void rcu_sched_qs(int cpu); extern void rcu_bh_qs(int cpu); +extern void rcu_note_context_switch(int cpu); extern int rcu_needs_cpu(int cpu); extern int rcu_expedited_torture_stats(char *page); @@ -86,6 +87,8 @@ static inline void __rcu_read_unlock_bh( extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +extern void synchronize_rcu_bh(void); +extern void synchronize_sched(void); extern void synchronize_rcu_expedited(void); static inline void synchronize_rcu_bh_expedited(void) @@ -120,4 +123,7 @@ static inline int rcu_blocking_is_gp(voi return num_online_cpus() == 1; } +extern void rcu_scheduler_starting(void); +extern int rcu_scheduler_active __read_mostly; + #endif /* __LINUX_RCUTREE_H */ diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/regulator/consumer.h linux-2.6.34-rc5-4b402210-rcu/include/linux/regulator/consumer.h --- linux-2.6.34-rc5/include/linux/regulator/consumer.h 2010-04-23 10:40:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/regulator/consumer.h 2010-04-23 10:42:24.000000000 -0700 @@ -183,9 +183,13 @@ static inline struct regulator *__must_c { /* Nothing except the stubbed out regulator API should be * looking at the value except to check if it is an error - * value so the actual return value doesn't matter. + * value. Drivers are free to handle NULL specifically by + * skipping all regulator API calls, but they don't have to. + * Drivers which don't, should make sure they properly handle + * corner cases of the API, such as regulator_get_voltage() + * returning 0. */ - return (struct regulator *)id; + return NULL; } static inline void regulator_put(struct regulator *regulator) { diff -urpNa -X dontdiff linux-2.6.34-rc5/include/linux/srcu.h linux-2.6.34-rc5-4b402210-rcu/include/linux/srcu.h --- linux-2.6.34-rc5/include/linux/srcu.h 2010-04-23 10:40:17.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/include/linux/srcu.h 2010-05-08 08:02:22.000000000 -0700 @@ -27,6 +27,8 @@ #ifndef _LINUX_SRCU_H #define _LINUX_SRCU_H +#include + struct srcu_struct_array { int c[2]; }; @@ -84,8 +86,8 @@ long srcu_batches_completed(struct srcu_ /** * srcu_read_lock_held - might we be in SRCU read-side critical section? * - * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in - * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, + * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU + * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. */ diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/cgroup.c linux-2.6.34-rc5-4b402210-rcu/kernel/cgroup.c --- linux-2.6.34-rc5/kernel/cgroup.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/cgroup.c 2010-05-08 08:02:22.000000000 -0700 @@ -1646,7 +1646,9 @@ static inline struct cftype *__d_cft(str int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) { char *start; - struct dentry *dentry = rcu_dereference(cgrp->dentry); + struct dentry *dentry = rcu_dereference_check(cgrp->dentry, + rcu_read_lock_held() || + cgroup_lock_is_held()); if (!dentry || cgrp == dummytop) { /* @@ -1662,13 +1664,17 @@ int cgroup_path(const struct cgroup *cgr *--start = '\0'; for (;;) { int len = dentry->d_name.len; + if ((start -= len) < buf) return -ENAMETOOLONG; - memcpy(start, cgrp->dentry->d_name.name, len); + memcpy(start, dentry->d_name.name, len); cgrp = cgrp->parent; if (!cgrp) break; - dentry = rcu_dereference(cgrp->dentry); + + dentry = rcu_dereference_check(cgrp->dentry, + rcu_read_lock_held() || + cgroup_lock_is_held()); if (!cgrp->parent) continue; if (--start < buf) @@ -4555,13 +4561,13 @@ static int alloc_css_id(struct cgroup_su { int subsys_id, i, depth = 0; struct cgroup_subsys_state *parent_css, *child_css; - struct css_id *child_id, *parent_id = NULL; + struct css_id *child_id, *parent_id; subsys_id = ss->subsys_id; parent_css = parent->subsys[subsys_id]; child_css = child->subsys[subsys_id]; - depth = css_depth(parent_css) + 1; parent_id = parent_css->id; + depth = parent_id->depth; child_id = get_new_cssid(ss, depth); if (IS_ERR(child_id)) diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/cgroup_freezer.c linux-2.6.34-rc5-4b402210-rcu/kernel/cgroup_freezer.c --- linux-2.6.34-rc5/kernel/cgroup_freezer.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/cgroup_freezer.c 2010-05-08 08:02:22.000000000 -0700 @@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_s * No lock is needed, since the task isn't on tasklist yet, * so it can't be moved to another cgroup, which means the * freezer won't be removed and will be valid during this - * function call. + * function call. Nevertheless, apply RCU read-side critical + * section to suppress RCU lockdep false positives. */ + rcu_read_lock(); freezer = task_freezer(task); + rcu_read_unlock(); /* * The root cgroup is non-freezable, so we can skip the diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/cred.c linux-2.6.34-rc5-4b402210-rcu/kernel/cred.c --- linux-2.6.34-rc5/kernel/cred.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/cred.c 2010-04-23 10:42:24.000000000 -0700 @@ -398,6 +398,8 @@ struct cred *prepare_usermodehelper_cred error: put_cred(new); + return NULL; + free_tgcred: #ifdef CONFIG_KEYS kfree(tgcred); @@ -791,8 +793,6 @@ bool creds_are_invalid(const struct cred { if (cred->magic != CRED_MAGIC) return true; - if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) - return true; #ifdef CONFIG_SECURITY_SELINUX if (selinux_is_enabled()) { if ((unsigned long) cred->security < PAGE_SIZE) diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/lockdep.c linux-2.6.34-rc5-4b402210-rcu/kernel/lockdep.c --- linux-2.6.34-rc5/kernel/lockdep.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/lockdep.c 2010-05-08 08:02:22.000000000 -0700 @@ -3801,8 +3801,11 @@ void lockdep_rcu_dereference(const char { struct task_struct *curr = current; +#ifndef CONFIG_PROVE_RCU_REPEATEDLY if (!debug_locks_off()) return; +#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ + /* Note: the following can be executed concurrently, so be careful. */ printk("\n===================================================\n"); printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); printk( "---------------------------------------------------\n"); diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/rcupdate.c linux-2.6.34-rc5-4b402210-rcu/kernel/rcupdate.c --- linux-2.6.34-rc5/kernel/rcupdate.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/rcupdate.c 2010-05-08 08:02:22.000000000 -0700 @@ -44,7 +44,6 @@ #include #include #include -#include #include #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -64,9 +63,6 @@ struct lockdep_map rcu_sched_lock_map = EXPORT_SYMBOL_GPL(rcu_sched_lock_map); #endif -int rcu_scheduler_active __read_mostly; -EXPORT_SYMBOL_GPL(rcu_scheduler_active); - #ifdef CONFIG_DEBUG_LOCK_ALLOC int debug_lockdep_rcu_enabled(void) @@ -97,21 +93,6 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held) #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /* - * This function is invoked towards the end of the scheduler's initialization - * process. Before this is called, the idle task might contain - * RCU read-side critical sections (during which time, this idle - * task is booting the system). After this function is called, the - * idle tasks are prohibited from containing RCU read-side critical - * sections. - */ -void rcu_scheduler_starting(void) -{ - WARN_ON(num_online_cpus() != 1); - WARN_ON(nr_context_switches() > 0); - rcu_scheduler_active = 1; -} - -/* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. */ @@ -122,3 +103,14 @@ void wakeme_after_rcu(struct rcu_head * rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion); } + +#ifdef CONFIG_PROVE_RCU +/* + * wrapper function to avoid #include problems. + */ +int rcu_my_thread_group_empty(void) +{ + return thread_group_empty(current); +} +EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); +#endif /* #ifdef CONFIG_PROVE_RCU */ diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/rcutiny.c linux-2.6.34-rc5-4b402210-rcu/kernel/rcutiny.c --- linux-2.6.34-rc5/kernel/rcutiny.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/rcutiny.c 2010-05-08 08:02:22.000000000 -0700 @@ -44,9 +44,9 @@ struct rcu_ctrlblk { }; /* Definition for rcupdate control block. */ -static struct rcu_ctrlblk rcu_ctrlblk = { - .donetail = &rcu_ctrlblk.rcucblist, - .curtail = &rcu_ctrlblk.rcucblist, +static struct rcu_ctrlblk rcu_sched_ctrlblk = { + .donetail = &rcu_sched_ctrlblk.rcucblist, + .curtail = &rcu_sched_ctrlblk.rcucblist, }; static struct rcu_ctrlblk rcu_bh_ctrlblk = { @@ -54,6 +54,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk .curtail = &rcu_bh_ctrlblk.rcucblist, }; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +int rcu_scheduler_active __read_mostly; +EXPORT_SYMBOL_GPL(rcu_scheduler_active); +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + #ifdef CONFIG_NO_HZ static long rcu_dynticks_nesting = 1; @@ -108,7 +113,8 @@ static int rcu_qsctr_help(struct rcu_ctr */ void rcu_sched_qs(int cpu) { - if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) + if (rcu_qsctr_help(&rcu_sched_ctrlblk) + + rcu_qsctr_help(&rcu_bh_ctrlblk)) raise_softirq(RCU_SOFTIRQ); } @@ -173,7 +179,7 @@ static void __rcu_process_callbacks(stru */ static void rcu_process_callbacks(struct softirq_action *unused) { - __rcu_process_callbacks(&rcu_ctrlblk); + __rcu_process_callbacks(&rcu_sched_ctrlblk); __rcu_process_callbacks(&rcu_bh_ctrlblk); } @@ -187,7 +193,8 @@ static void rcu_process_callbacks(struct * * Cool, huh? (Due to Josh Triplett.) * - * But we want to make this a static inline later. + * But we want to make this a static inline later. The cond_resched() + * currently makes this problematic. */ void synchronize_sched(void) { @@ -195,12 +202,6 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); -void synchronize_rcu_bh(void) -{ - synchronize_sched(); -} -EXPORT_SYMBOL_GPL(synchronize_rcu_bh); - /* * Helper function for call_rcu() and call_rcu_bh(). */ @@ -226,7 +227,7 @@ static void __call_rcu(struct rcu_head * */ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { - __call_rcu(head, func, &rcu_ctrlblk); + __call_rcu(head, func, &rcu_sched_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu); @@ -280,3 +281,5 @@ void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); } + +#include "rcutiny_plugin.h" diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/rcutiny_plugin.h linux-2.6.34-rc5-4b402210-rcu/kernel/rcutiny_plugin.h --- linux-2.6.34-rc5/kernel/rcutiny_plugin.h 1969-12-31 16:00:00.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/rcutiny_plugin.h 2010-05-08 08:02:22.000000000 -0700 @@ -0,0 +1,39 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * Internal non-public definitions that provide either classic + * or preemptable semantics. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2009 + * + * Author: Paul E. McKenney + */ + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +#include + +/* + * During boot, we forgive RCU lockdep issues. After this function is + * invoked, we start taking RCU lockdep issues seriously. + */ +void rcu_scheduler_starting(void) +{ + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; +} + +#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/rcutree.c linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree.c --- linux-2.6.34-rc5/kernel/rcutree.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree.c 2010-05-08 08:02:22.000000000 -0700 @@ -46,6 +46,7 @@ #include #include #include +#include #include "rcutree.h" @@ -53,8 +54,8 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; -#define RCU_STATE_INITIALIZER(name) { \ - .level = { &name.node[0] }, \ +#define RCU_STATE_INITIALIZER(structname) { \ + .level = { &structname.node[0] }, \ .levelcnt = { \ NUM_RCU_LVL_0, /* root of hierarchy. */ \ NUM_RCU_LVL_1, \ @@ -65,13 +66,14 @@ static struct lock_class_key rcu_node_cl .signaled = RCU_GP_IDLE, \ .gpnum = -300, \ .completed = -300, \ - .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&name.onofflock), \ + .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ .orphan_cbs_list = NULL, \ - .orphan_cbs_tail = &name.orphan_cbs_list, \ + .orphan_cbs_tail = &structname.orphan_cbs_list, \ .orphan_qlen = 0, \ - .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&name.fqslock), \ + .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ .n_force_qs = 0, \ .n_force_qs_ngp = 0, \ + .name = #structname, \ } struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); @@ -80,6 +82,9 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sche struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); +int rcu_scheduler_active __read_mostly; +EXPORT_SYMBOL_GPL(rcu_scheduler_active); + /* * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s * permit this function to be invoked without holding the root rcu_node @@ -97,25 +102,32 @@ static int rcu_gp_in_progress(struct rcu */ void rcu_sched_qs(int cpu) { - struct rcu_data *rdp; + struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); - rdp = &per_cpu(rcu_sched_data, cpu); rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; - rcu_preempt_note_context_switch(cpu); } void rcu_bh_qs(int cpu) { - struct rcu_data *rdp; + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp = &per_cpu(rcu_bh_data, cpu); rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; } +/* + * Note a context switch. This is a quiescent state for RCU-sched, + * and requires special handling for preemptible RCU. + */ +void rcu_note_context_switch(int cpu) +{ + rcu_sched_qs(cpu); + rcu_preempt_note_context_switch(cpu); +} + #ifdef CONFIG_NO_HZ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = 1, @@ -438,6 +450,8 @@ static int rcu_implicit_dynticks_qs(stru #ifdef CONFIG_RCU_CPU_STALL_DETECTOR +int rcu_cpu_stall_panicking __read_mostly; + static void record_gp_stall_check_time(struct rcu_state *rsp) { rsp->gp_start = jiffies; @@ -470,7 +484,8 @@ static void print_other_cpu_stall(struct /* OK, time to rat on our buddy... */ - printk(KERN_ERR "INFO: RCU detected CPU stalls:"); + printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", + rsp->name); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave(&rnp->lock, flags); rcu_print_task_stall(rnp); @@ -481,7 +496,7 @@ static void print_other_cpu_stall(struct if (rnp->qsmask & (1UL << cpu)) printk(" %d", rnp->grplo + cpu); } - printk(" (detected by %d, t=%ld jiffies)\n", + printk("} (detected by %d, t=%ld jiffies)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start)); trigger_all_cpu_backtrace(); @@ -497,8 +512,8 @@ static void print_cpu_stall(struct rcu_s unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); - printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", - smp_processor_id(), jiffies - rsp->gp_start); + printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", + rsp->name, smp_processor_id(), jiffies - rsp->gp_start); trigger_all_cpu_backtrace(); raw_spin_lock_irqsave(&rnp->lock, flags); @@ -515,6 +530,8 @@ static void check_cpu_stall(struct rcu_s long delta; struct rcu_node *rnp; + if (rcu_cpu_stall_panicking) + return; delta = jiffies - rsp->jiffies_stall; rnp = rdp->mynode; if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { @@ -529,6 +546,21 @@ static void check_cpu_stall(struct rcu_s } } +static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) +{ + rcu_cpu_stall_panicking = 1; + return NOTIFY_DONE; +} + +static struct notifier_block rcu_panic_block = { + .notifier_call = rcu_panic, +}; + +static void __init check_cpu_stall_init(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); +} + #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void record_gp_stall_check_time(struct rcu_state *rsp) @@ -539,6 +571,10 @@ static void check_cpu_stall(struct rcu_s { } +static void __init check_cpu_stall_init(void) +{ +} + #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ /* @@ -1125,8 +1161,6 @@ static void rcu_do_batch(struct rcu_stat */ void rcu_check_callbacks(int cpu, int user) { - if (!rcu_pending(cpu)) - return; /* if nothing for RCU to do. */ if (user || (idle_cpu(cpu) && rcu_scheduler_active && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { @@ -1158,7 +1192,8 @@ void rcu_check_callbacks(int cpu, int us rcu_bh_qs(cpu); } rcu_preempt_check_callbacks(cpu); - raise_softirq(RCU_SOFTIRQ); + if (rcu_pending(cpu)) + raise_softirq(RCU_SOFTIRQ); } #ifdef CONFIG_SMP @@ -1236,11 +1271,11 @@ static void force_quiescent_state(struct break; /* grace period idle or initializing, ignore. */ case RCU_SAVE_DYNTICK: - - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) break; /* So gcc recognizes the dead code. */ + raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ + /* Record dyntick-idle state. */ force_qs_rnp(rsp, dyntick_save_progress_counter); raw_spin_lock(&rnp->lock); /* irqs already disabled */ @@ -1498,8 +1533,20 @@ static int __rcu_pending(struct rcu_stat check_cpu_stall(rsp, rdp); /* Is the RCU core waiting for a quiescent state from this CPU? */ - if (rdp->qs_pending) { + if (rdp->qs_pending && !rdp->passed_quiesc) { + + /* + * If force_quiescent_state() coming soon and this CPU + * needs a quiescent state, and this is either RCU-sched + * or RCU-bh, force a local reschedule. + */ rdp->n_rp_qs_pending++; + if (!rdp->preemptable && + ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, + jiffies)) + set_need_resched(); + } else if (rdp->qs_pending && rdp->passed_quiesc) { + rdp->n_rp_report_qs++; return 1; } @@ -1767,6 +1814,21 @@ static int __cpuinit rcu_cpu_notify(stru } /* + * This function is invoked towards the end of the scheduler's initialization + * process. Before this is called, the idle task might contain + * RCU read-side critical sections (during which time, this idle + * task is booting the system). After this function is called, the + * idle tasks are prohibited from containing RCU read-side critical + * sections. This function also enables RCU lockdep checking. + */ +void rcu_scheduler_starting(void) +{ + WARN_ON(num_online_cpus() != 1); + WARN_ON(nr_context_switches() > 0); + rcu_scheduler_active = 1; +} + +/* * Compute the per-level fanout, either using the exact fanout specified * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. */ @@ -1849,6 +1911,14 @@ static void __init rcu_init_one(struct r INIT_LIST_HEAD(&rnp->blocked_tasks[3]); } } + + rnp = rsp->level[NUM_RCU_LVLS - 1]; + for_each_possible_cpu(i) { + while (i > rnp->grphi) + rnp++; + rsp->rda[i]->mynode = rnp; + rcu_boot_init_percpu_data(i, rsp); + } } /* @@ -1859,19 +1929,11 @@ static void __init rcu_init_one(struct r #define RCU_INIT_FLAVOR(rsp, rcu_data) \ do { \ int i; \ - int j; \ - struct rcu_node *rnp; \ \ - rcu_init_one(rsp); \ - rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ - j = 0; \ for_each_possible_cpu(i) { \ - if (i > rnp[j].grphi) \ - j++; \ - per_cpu(rcu_data, i).mynode = &rnp[j]; \ (rsp)->rda[i] = &per_cpu(rcu_data, i); \ - rcu_boot_init_percpu_data(i, rsp); \ } \ + rcu_init_one(rsp); \ } while (0) void __init rcu_init(void) @@ -1879,12 +1941,6 @@ void __init rcu_init(void) int cpu; rcu_bootup_announce(); -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR - printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ -#if NUM_RCU_LVL_4 != 0 - printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); -#endif /* #if NUM_RCU_LVL_4 != 0 */ RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); __rcu_init_preempt(); @@ -1898,6 +1954,7 @@ void __init rcu_init(void) cpu_notifier(rcu_cpu_notify, 0); for_each_online_cpu(cpu) rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); + check_cpu_stall_init(); } #include "rcutree_plugin.h" diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/rcutree.h linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree.h --- linux-2.6.34-rc5/kernel/rcutree.h 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree.h 2010-05-08 08:02:22.000000000 -0700 @@ -223,6 +223,7 @@ struct rcu_data { /* 5) __rcu_pending() statistics. */ unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ unsigned long n_rp_qs_pending; + unsigned long n_rp_report_qs; unsigned long n_rp_cb_ready; unsigned long n_rp_cpu_needs_gp; unsigned long n_rp_gp_completed; @@ -326,6 +327,7 @@ struct rcu_state { unsigned long jiffies_stall; /* Time at which to check */ /* for CPU stalls. */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ + char *name; /* Name of structure. */ }; /* Return values for rcu_preempt_offline_tasks(). */ diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/rcutree_plugin.h linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree_plugin.h --- linux-2.6.34-rc5/kernel/rcutree_plugin.h 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree_plugin.h 2010-05-08 08:02:22.000000000 -0700 @@ -26,6 +26,45 @@ #include +/* + * Check the RCU kernel configuration parameters and print informative + * messages about anything out of the ordinary. If you like #ifdef, you + * will love this function. + */ +static void __init rcu_bootup_announce_oddness(void) +{ +#ifdef CONFIG_RCU_TRACE + printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); +#endif +#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) + printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", + CONFIG_RCU_FANOUT); +#endif +#ifdef CONFIG_RCU_FANOUT_EXACT + printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); +#endif +#ifdef CONFIG_RCU_FAST_NO_HZ + printk(KERN_INFO + "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); +#endif +#ifdef CONFIG_PROVE_RCU + printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); +#endif +#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE + printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); +#endif +#ifndef CONFIG_RCU_CPU_STALL_DETECTOR + printk(KERN_INFO + "\tRCU-based detection of stalled CPUs is disabled.\n"); +#endif +#ifndef CONFIG_RCU_CPU_STALL_VERBOSE + printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); +#endif +#if NUM_RCU_LVL_4 != 0 + printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); +#endif +} + #ifdef CONFIG_TREE_PREEMPT_RCU struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); @@ -38,8 +77,8 @@ static int rcu_preempted_readers_exp(str */ static void __init rcu_bootup_announce(void) { - printk(KERN_INFO - "Experimental preemptable hierarchical RCU implementation.\n"); + printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); + rcu_bootup_announce_oddness(); } /* @@ -75,13 +114,19 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_st * that this just means that the task currently running on the CPU is * not in a quiescent state. There might be any number of tasks blocked * while in an RCU read-side critical section. + * + * Unlike the other rcu_*_qs() functions, callers to this function + * must disable irqs in order to protect the assignment to + * ->rcu_read_unlock_special. */ static void rcu_preempt_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); + rdp->passed_quiesc_completed = rdp->gpnum - 1; barrier(); rdp->passed_quiesc = 1; + current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; } /* @@ -144,9 +189,8 @@ static void rcu_preempt_note_context_swi * grace period, then the fact that the task has been enqueued * means that we continue to block the current grace period. */ - rcu_preempt_qs(cpu); local_irq_save(flags); - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; + rcu_preempt_qs(cpu); local_irq_restore(flags); } @@ -236,7 +280,6 @@ static void rcu_read_unlock_special(stru */ special = t->rcu_read_unlock_special; if (special & RCU_READ_UNLOCK_NEED_QS) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; rcu_preempt_qs(smp_processor_id()); } @@ -473,7 +516,6 @@ static void rcu_preempt_check_callbacks( struct task_struct *t = current; if (t->rcu_read_lock_nesting == 0) { - t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; rcu_preempt_qs(cpu); return; } @@ -754,6 +796,7 @@ void exit_rcu(void) static void __init rcu_bootup_announce(void) { printk(KERN_INFO "Hierarchical RCU implementation.\n"); + rcu_bootup_announce_oddness(); } /* @@ -1008,6 +1051,8 @@ static DEFINE_PER_CPU(unsigned long, rcu int rcu_needs_cpu(int cpu) { int c = 0; + int snap; + int snap_nmi; int thatcpu; /* Check for being in the holdoff period. */ @@ -1015,12 +1060,18 @@ int rcu_needs_cpu(int cpu) return rcu_needs_cpu_quick_check(cpu); /* Don't bother unless we are the last non-dyntick-idle CPU. */ - for_each_cpu_not(thatcpu, nohz_cpu_mask) - if (thatcpu != cpu) { + for_each_online_cpu(thatcpu) { + if (thatcpu == cpu) + continue; + snap = per_cpu(rcu_dynticks, thatcpu).dynticks; + snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; + smp_mb(); /* Order sampling of snap with end of grace period. */ + if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { per_cpu(rcu_dyntick_drain, cpu) = 0; per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; return rcu_needs_cpu_quick_check(cpu); } + } /* Check and update the rcu_dyntick_drain sequencing. */ if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/rcutree_trace.c linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree_trace.c --- linux-2.6.34-rc5/kernel/rcutree_trace.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/rcutree_trace.c 2010-05-08 08:02:22.000000000 -0700 @@ -241,11 +241,13 @@ static const struct file_operations rcug static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) { seq_printf(m, "%3d%cnp=%ld " - "qsp=%ld cbr=%ld cng=%ld gpc=%ld gps=%ld nf=%ld nn=%ld\n", + "qsp=%ld rpq=%ld cbr=%ld cng=%ld " + "gpc=%ld gps=%ld nf=%ld nn=%ld\n", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->n_rcu_pending, rdp->n_rp_qs_pending, + rdp->n_rp_report_qs, rdp->n_rp_cb_ready, rdp->n_rp_cpu_needs_gp, rdp->n_rp_gp_completed, diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/sched.c linux-2.6.34-rc5-4b402210-rcu/kernel/sched.c --- linux-2.6.34-rc5/kernel/sched.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/sched.c 2010-05-08 08:02:22.000000000 -0700 @@ -323,6 +323,15 @@ static inline struct task_group *task_gr /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { + /* + * Strictly speaking this rcu_read_lock() is not needed since the + * task_group is tied to the cgroup, which in turn can never go away + * as long as there are tasks attached to it. + * + * However since task_group() uses task_subsys_state() which is an + * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. + */ + rcu_read_lock(); #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; p->se.parent = task_group(p)->se[cpu]; @@ -332,6 +341,7 @@ static inline void set_task_rq(struct ta p->rt.rt_rq = task_group(p)->rt_rq[cpu]; p->rt.parent = task_group(p)->rt_se[cpu]; #endif + rcu_read_unlock(); } #else @@ -3696,7 +3706,7 @@ need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); - rcu_sched_qs(cpu); + rcu_note_context_switch(cpu); prev = rq->curr; switch_count = &prev->nivcsw; @@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lo * the mutex owner just released it and exited. */ if (probe_kernel_address(&owner->cpu, cpu)) - goto out; + return 0; #else cpu = owner->cpu; #endif @@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lo * the cpu field may no longer be valid. */ if (cpu >= nr_cpumask_bits) - goto out; + return 0; /* * We need to validate that we can do a * get_cpu() and that we have the percpu area. */ if (!cpu_online(cpu)) - goto out; + return 0; rq = cpu_rq(cpu); @@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lo cpu_relax(); } -out: + return 1; } #endif diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/sched_debug.c linux-2.6.34-rc5-4b402210-rcu/kernel/sched_debug.c --- linux-2.6.34-rc5/kernel/sched_debug.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/sched_debug.c 2010-05-08 08:02:22.000000000 -0700 @@ -114,7 +114,9 @@ print_task(struct seq_file *m, struct rq { char path[64]; + rcu_read_lock(); cgroup_path(task_group(p)->css.cgroup, path, sizeof(path)); + rcu_read_unlock(); SEQ_printf(m, " %s", path); } #endif diff -urpNa -X dontdiff linux-2.6.34-rc5/kernel/softirq.c linux-2.6.34-rc5-4b402210-rcu/kernel/softirq.c --- linux-2.6.34-rc5/kernel/softirq.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/kernel/softirq.c 2010-05-08 08:02:22.000000000 -0700 @@ -716,7 +716,7 @@ static int run_ksoftirqd(void * __bind_c preempt_enable_no_resched(); cond_resched(); preempt_disable(); - rcu_sched_qs((long)__bind_cpu); + rcu_note_context_switch((long)__bind_cpu); } preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); diff -urpNa -X dontdiff linux-2.6.34-rc5/lib/Kconfig.debug linux-2.6.34-rc5-4b402210-rcu/lib/Kconfig.debug --- linux-2.6.34-rc5/lib/Kconfig.debug 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/lib/Kconfig.debug 2010-05-08 08:02:22.000000000 -0700 @@ -512,6 +512,18 @@ config PROVE_RCU Say N if you are unsure. +config PROVE_RCU_REPEATEDLY + bool "RCU debugging: don't disable PROVE_RCU on first splat" + depends on PROVE_RCU + default n + help + By itself, PROVE_RCU will disable checking upon issuing the + first warning (or "splat"). This feature prevents such + disabling, allowing multiple RCU-lockdep warnings to be printed + on a single reboot. + + Say N if you are unsure. + config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -793,7 +805,7 @@ config RCU_CPU_STALL_DETECTOR config RCU_CPU_STALL_VERBOSE bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU - default n + default y help This option causes RCU to printk detailed per-task information for any tasks that are stalling the current RCU grace period. diff -urpNa -X dontdiff linux-2.6.34-rc5/MAINTAINERS linux-2.6.34-rc5-4b402210-rcu/MAINTAINERS --- linux-2.6.34-rc5/MAINTAINERS 2010-04-23 10:39:18.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/MAINTAINERS 2010-04-23 10:42:23.000000000 -0700 @@ -1960,7 +1960,7 @@ F: lib/kobj* DRM DRIVERS M: David Airlie -L: dri-devel@lists.sourceforge.net +L: dri-devel@lists.freedesktop.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git S: Maintained F: drivers/gpu/drm/ @@ -4791,12 +4791,11 @@ F: drivers/s390/crypto/ S390 ZFCP DRIVER M: Christof Schmitt -M: Martin Peschke +M: Swen Schillig M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported -F: Documentation/s390/zfcpdump.txt F: drivers/s390/scsi/zfcp_* S390 IUCV NETWORK LAYER diff -urpNa -X dontdiff linux-2.6.34-rc5/mm/memcontrol.c linux-2.6.34-rc5-4b402210-rcu/mm/memcontrol.c --- linux-2.6.34-rc5/mm/memcontrol.c 2010-04-23 10:40:19.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/mm/memcontrol.c 2010-05-08 08:02:22.000000000 -0700 @@ -811,10 +811,12 @@ int task_in_mem_cgroup(struct task_struc * enabled in "curr" and "curr" is a child of "mem" in *cgroup* * hierarchy(even if use_hierarchy is disabled in "mem"). */ + rcu_read_lock(); if (mem->use_hierarchy) ret = css_is_ancestor(&curr->css, &mem->css); else ret = (curr == mem); + rcu_read_unlock(); css_put(&curr->css); return ret; } @@ -2312,7 +2314,9 @@ mem_cgroup_uncharge_swapcache(struct pag /* record memcg information */ if (do_swap_account && swapout && memcg) { + rcu_read_lock(); swap_cgroup_record(ent, css_id(&memcg->css)); + rcu_read_unlock(); mem_cgroup_get(memcg); } if (swapout && memcg) @@ -2369,8 +2373,10 @@ static int mem_cgroup_move_swap_account( { unsigned short old_id, new_id; + rcu_read_lock(); old_id = css_id(&from->css); new_id = css_id(&to->css); + rcu_read_unlock(); if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { mem_cgroup_swap_statistics(from, false); @@ -4038,11 +4044,16 @@ static int is_target_pte_for_mc(struct v put_page(page); } /* throught */ - if (ent.val && do_swap_account && !ret && - css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { - ret = MC_TARGET_SWAP; - if (target) - target->ent = ent; + if (ent.val && do_swap_account && !ret) { + unsigned short id; + rcu_read_lock(); + id = css_id(&mc.from->css); + rcu_read_unlock(); + if (id == lookup_swap_cgroup(ent)) { + ret = MC_TARGET_SWAP; + if (target) + target->ent = ent; + } } return ret; } diff -urpNa -X dontdiff linux-2.6.34-rc5/security/inode.c linux-2.6.34-rc5-4b402210-rcu/security/inode.c --- linux-2.6.34-rc5/security/inode.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/security/inode.c 2010-04-23 10:42:24.000000000 -0700 @@ -161,13 +161,13 @@ static int create_by_name(const char *na mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry)) { + if (!IS_ERR(*dentry)) { if ((mode & S_IFMT) == S_IFDIR) error = mkdir(parent->d_inode, *dentry, mode); else error = create(parent->d_inode, *dentry, mode); } else - error = PTR_ERR(dentry); + error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); return error; diff -urpNa -X dontdiff linux-2.6.34-rc5/security/keys/request_key.c linux-2.6.34-rc5-4b402210-rcu/security/keys/request_key.c --- linux-2.6.34-rc5/security/keys/request_key.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/security/keys/request_key.c 2010-05-08 08:02:22.000000000 -0700 @@ -68,7 +68,8 @@ static int call_sbin_request_key(struct { const struct cred *cred = current_cred(); key_serial_t prkey, sskey; - struct key *key = cons->key, *authkey = cons->authkey, *keyring; + struct key *key = cons->key, *authkey = cons->authkey, *keyring, + *session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; @@ -112,10 +113,12 @@ static int call_sbin_request_key(struct if (cred->tgcred->process_keyring) prkey = cred->tgcred->process_keyring->serial; - if (cred->tgcred->session_keyring) - sskey = rcu_dereference(cred->tgcred->session_keyring)->serial; - else - sskey = cred->user->session_keyring->serial; + rcu_read_lock(); + session = rcu_dereference(cred->tgcred->session_keyring); + if (!session) + session = cred->user->session_keyring; + sskey = session->serial; + rcu_read_unlock(); sprintf(keyring_str[2], "%d", sskey); diff -urpNa -X dontdiff linux-2.6.34-rc5/security/keys/user_defined.c linux-2.6.34-rc5-4b402210-rcu/security/keys/user_defined.c --- linux-2.6.34-rc5/security/keys/user_defined.c 2010-02-24 10:52:17.000000000 -0800 +++ linux-2.6.34-rc5-4b402210-rcu/security/keys/user_defined.c 2010-05-08 08:02:22.000000000 -0700 @@ -199,7 +199,8 @@ long user_read(const struct key *key, ch struct user_key_payload *upayload; long ret; - upayload = rcu_dereference(key->payload.data); + upayload = rcu_dereference_protected( + key->payload.data, rwsem_is_locked(&((struct key *)key)->sem)); ret = upayload->datalen; /* we can return the data as is */ diff -urpNa -X dontdiff linux-2.6.34-rc5/virt/kvm/kvm_main.c linux-2.6.34-rc5-4b402210-rcu/virt/kvm/kvm_main.c --- linux-2.6.34-rc5/virt/kvm/kvm_main.c 2010-04-23 10:40:31.000000000 -0700 +++ linux-2.6.34-rc5-4b402210-rcu/virt/kvm/kvm_main.c 2010-04-23 10:42:24.000000000 -0700 @@ -341,7 +341,11 @@ static void kvm_mmu_notifier_release(str struct mm_struct *mm) { struct kvm *kvm = mmu_notifier_to_kvm(mn); + int idx; + + idx = srcu_read_lock(&kvm->srcu); kvm_arch_flush_shadow(kvm); + srcu_read_unlock(&kvm->srcu, idx); } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { @@ -648,7 +652,7 @@ skip_lpage: /* Allocate page dirty bitmap if needed */ if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { - unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); new.dirty_bitmap = vmalloc(dirty_bytes); if (!new.dirty_bitmap) @@ -768,7 +772,7 @@ int kvm_get_dirty_log(struct kvm *kvm, { struct kvm_memory_slot *memslot; int r, i; - int n; + unsigned long n; unsigned long any = 0; r = -EINVAL; @@ -780,7 +784,7 @@ int kvm_get_dirty_log(struct kvm *kvm, if (!memslot->dirty_bitmap) goto out; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + n = kvm_dirty_bitmap_bytes(memslot); for (i = 0; !any && i < n/sizeof(long); ++i) any = memslot->dirty_bitmap[i]; @@ -1186,10 +1190,13 @@ void mark_page_dirty(struct kvm *kvm, gf memslot = gfn_to_memslot_unaliased(kvm, gfn); if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; + unsigned long *p = memslot->dirty_bitmap + + rel_gfn / BITS_PER_LONG; + int offset = rel_gfn % BITS_PER_LONG; /* avoid RMW */ - if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) - generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); + if (!generic_test_le_bit(offset, p)) + generic___set_le_bit(offset, p); } }